var/home/core/zuul-output/0000755000175000017500000000000015112242312014516 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112252503015465 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005030766615112252473017714 0ustar rootrootNov 28 06:52:34 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 06:52:34 crc restorecon[4817]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:34 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 06:52:35 crc restorecon[4817]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 06:52:35 crc kubenswrapper[4922]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 06:52:35 crc kubenswrapper[4922]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 06:52:35 crc kubenswrapper[4922]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 06:52:35 crc kubenswrapper[4922]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 06:52:35 crc kubenswrapper[4922]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 06:52:35 crc kubenswrapper[4922]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.199551 4922 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202184 4922 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202202 4922 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202207 4922 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202211 4922 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202228 4922 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202233 4922 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202238 4922 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202243 4922 feature_gate.go:330] unrecognized feature gate: Example Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202247 4922 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202252 4922 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202257 4922 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202261 4922 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202265 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202270 4922 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202274 4922 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202279 4922 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202283 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202286 4922 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202290 4922 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202293 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202298 4922 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202303 4922 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202307 4922 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202311 4922 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202315 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202318 4922 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202322 4922 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202326 4922 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202331 4922 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202335 4922 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202339 4922 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202342 4922 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202348 4922 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202353 4922 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202357 4922 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202361 4922 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202365 4922 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202369 4922 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202373 4922 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202376 4922 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202380 4922 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202383 4922 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202387 4922 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202391 4922 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202394 4922 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202398 4922 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202402 4922 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202407 4922 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202410 4922 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202414 4922 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202418 4922 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202421 4922 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202424 4922 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202427 4922 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202431 4922 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202434 4922 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202438 4922 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202441 4922 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202444 4922 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202448 4922 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202452 4922 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202455 4922 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202458 4922 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202462 4922 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202465 4922 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202469 4922 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202473 4922 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202476 4922 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202479 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202483 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.202486 4922 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202718 4922 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202729 4922 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202735 4922 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202740 4922 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202746 4922 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202750 4922 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202755 4922 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202763 4922 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202767 4922 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202771 4922 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202776 4922 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202780 4922 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202784 4922 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202788 4922 flags.go:64] FLAG: --cgroup-root="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202792 4922 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202796 4922 flags.go:64] FLAG: --client-ca-file="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202800 4922 flags.go:64] FLAG: --cloud-config="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202804 4922 flags.go:64] FLAG: --cloud-provider="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202808 4922 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202812 4922 flags.go:64] FLAG: --cluster-domain="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202816 4922 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202820 4922 flags.go:64] FLAG: --config-dir="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202824 4922 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202828 4922 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202834 4922 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202838 4922 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202842 4922 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202846 4922 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202850 4922 flags.go:64] FLAG: --contention-profiling="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202854 4922 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202857 4922 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202862 4922 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202866 4922 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202870 4922 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202874 4922 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202878 4922 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202883 4922 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202887 4922 flags.go:64] FLAG: --enable-server="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202891 4922 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202898 4922 flags.go:64] FLAG: --event-burst="100" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202902 4922 flags.go:64] FLAG: --event-qps="50" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202906 4922 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202910 4922 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202915 4922 flags.go:64] FLAG: --eviction-hard="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202920 4922 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202924 4922 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202928 4922 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202932 4922 flags.go:64] FLAG: --eviction-soft="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202937 4922 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202941 4922 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202945 4922 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202949 4922 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202953 4922 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202957 4922 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202961 4922 flags.go:64] FLAG: --feature-gates="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202966 4922 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202970 4922 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202974 4922 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202978 4922 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202982 4922 flags.go:64] FLAG: --healthz-port="10248" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202986 4922 flags.go:64] FLAG: --help="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202990 4922 flags.go:64] FLAG: --hostname-override="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202994 4922 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.202998 4922 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203002 4922 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203006 4922 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203010 4922 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203014 4922 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203019 4922 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203023 4922 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203027 4922 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203032 4922 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203036 4922 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203041 4922 flags.go:64] FLAG: --kube-reserved="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203045 4922 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203049 4922 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203053 4922 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203056 4922 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203061 4922 flags.go:64] FLAG: --lock-file="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203064 4922 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203068 4922 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203072 4922 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203078 4922 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203082 4922 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203086 4922 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203090 4922 flags.go:64] FLAG: --logging-format="text" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203094 4922 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203098 4922 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203103 4922 flags.go:64] FLAG: --manifest-url="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203107 4922 flags.go:64] FLAG: --manifest-url-header="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203112 4922 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203116 4922 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203121 4922 flags.go:64] FLAG: --max-pods="110" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203125 4922 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203129 4922 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203134 4922 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203138 4922 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203142 4922 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203146 4922 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203150 4922 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203158 4922 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203162 4922 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203166 4922 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203171 4922 flags.go:64] FLAG: --pod-cidr="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203175 4922 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203181 4922 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203185 4922 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203189 4922 flags.go:64] FLAG: --pods-per-core="0" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203193 4922 flags.go:64] FLAG: --port="10250" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203198 4922 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203202 4922 flags.go:64] FLAG: --provider-id="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203205 4922 flags.go:64] FLAG: --qos-reserved="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203209 4922 flags.go:64] FLAG: --read-only-port="10255" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203229 4922 flags.go:64] FLAG: --register-node="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203234 4922 flags.go:64] FLAG: --register-schedulable="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203238 4922 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203245 4922 flags.go:64] FLAG: --registry-burst="10" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203248 4922 flags.go:64] FLAG: --registry-qps="5" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203253 4922 flags.go:64] FLAG: --reserved-cpus="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203257 4922 flags.go:64] FLAG: --reserved-memory="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203265 4922 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203269 4922 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203272 4922 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203276 4922 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203280 4922 flags.go:64] FLAG: --runonce="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203284 4922 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203288 4922 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203292 4922 flags.go:64] FLAG: --seccomp-default="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203296 4922 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203300 4922 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203304 4922 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203308 4922 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203312 4922 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203316 4922 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203320 4922 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203325 4922 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203329 4922 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203333 4922 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203337 4922 flags.go:64] FLAG: --system-cgroups="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203341 4922 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203347 4922 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203351 4922 flags.go:64] FLAG: --tls-cert-file="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203355 4922 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203359 4922 flags.go:64] FLAG: --tls-min-version="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203363 4922 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203368 4922 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203372 4922 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203376 4922 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203380 4922 flags.go:64] FLAG: --v="2" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203385 4922 flags.go:64] FLAG: --version="false" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203391 4922 flags.go:64] FLAG: --vmodule="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203395 4922 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.203400 4922 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203516 4922 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203521 4922 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203525 4922 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203529 4922 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203532 4922 feature_gate.go:330] unrecognized feature gate: Example Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203537 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203541 4922 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203544 4922 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203547 4922 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203551 4922 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203554 4922 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203558 4922 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203561 4922 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203565 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203570 4922 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203573 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203577 4922 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203580 4922 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203585 4922 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203889 4922 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203896 4922 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203900 4922 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203904 4922 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203907 4922 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203911 4922 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203915 4922 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203918 4922 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203922 4922 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.203988 4922 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204322 4922 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204329 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204333 4922 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204337 4922 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204341 4922 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204346 4922 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204350 4922 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204354 4922 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204358 4922 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204362 4922 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204366 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204370 4922 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204378 4922 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204382 4922 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204385 4922 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204391 4922 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204395 4922 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204401 4922 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204406 4922 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204410 4922 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204414 4922 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204419 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204422 4922 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204426 4922 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204434 4922 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204438 4922 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204442 4922 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204447 4922 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204451 4922 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204454 4922 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204458 4922 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204462 4922 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204467 4922 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204471 4922 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204474 4922 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204505 4922 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204509 4922 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204516 4922 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204520 4922 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204523 4922 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.204527 4922 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.205798 4922 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.205806 4922 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.217147 4922 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.217198 4922 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217355 4922 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217370 4922 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217379 4922 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217388 4922 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217397 4922 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217407 4922 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217415 4922 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217423 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217432 4922 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217440 4922 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217448 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217456 4922 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217464 4922 feature_gate.go:330] unrecognized feature gate: Example Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217472 4922 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217481 4922 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217489 4922 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217497 4922 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217504 4922 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217513 4922 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217520 4922 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217528 4922 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217536 4922 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217544 4922 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217551 4922 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217559 4922 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217567 4922 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217577 4922 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217591 4922 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217601 4922 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217611 4922 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217620 4922 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217629 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217639 4922 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217648 4922 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217656 4922 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217665 4922 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217673 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217682 4922 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217689 4922 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217697 4922 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217705 4922 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217713 4922 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217723 4922 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217733 4922 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217742 4922 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217750 4922 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217759 4922 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217766 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217774 4922 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217781 4922 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217789 4922 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217797 4922 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217805 4922 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217812 4922 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217820 4922 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217830 4922 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217841 4922 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217852 4922 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217861 4922 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217870 4922 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217879 4922 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217888 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217896 4922 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217904 4922 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217912 4922 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217920 4922 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217927 4922 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217936 4922 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217944 4922 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217951 4922 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.217959 4922 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.217972 4922 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218208 4922 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218245 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218254 4922 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218262 4922 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218271 4922 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218281 4922 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218293 4922 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218302 4922 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218310 4922 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218319 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218331 4922 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218339 4922 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218348 4922 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218357 4922 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218365 4922 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218374 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218382 4922 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218390 4922 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218400 4922 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218408 4922 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218419 4922 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218429 4922 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218438 4922 feature_gate.go:330] unrecognized feature gate: Example Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218446 4922 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218455 4922 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218464 4922 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218472 4922 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218480 4922 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218488 4922 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218495 4922 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218504 4922 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218512 4922 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218519 4922 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218527 4922 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218535 4922 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218543 4922 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218552 4922 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218560 4922 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218568 4922 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218575 4922 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218584 4922 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218592 4922 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218600 4922 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218608 4922 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218616 4922 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218623 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218635 4922 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218644 4922 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218652 4922 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218660 4922 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218669 4922 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218677 4922 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218686 4922 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218694 4922 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218702 4922 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218710 4922 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218718 4922 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218726 4922 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218734 4922 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218742 4922 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218750 4922 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218758 4922 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218769 4922 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218779 4922 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218789 4922 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218798 4922 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218808 4922 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218818 4922 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218827 4922 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218835 4922 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.218843 4922 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.218856 4922 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.219484 4922 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.223911 4922 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.224031 4922 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.224849 4922 server.go:997] "Starting client certificate rotation" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.224876 4922 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.225140 4922 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-01 01:44:16.649574585 +0000 UTC Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.225378 4922 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 66h51m41.424202988s for next certificate rotation Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.232790 4922 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.235201 4922 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.246028 4922 log.go:25] "Validated CRI v1 runtime API" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.267508 4922 log.go:25] "Validated CRI v1 image API" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.269892 4922 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.273868 4922 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-06-43-13-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.273926 4922 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.303797 4922 manager.go:217] Machine: {Timestamp:2025-11-28 06:52:35.301201327 +0000 UTC m=+0.221596989 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:a83ea3b2-2af6-4e19-83ef-b63bfe4faed4 BootID:3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54 Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:98:f8:50 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:98:f8:50 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:69:7c:99 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:60:f8:96 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:e5:6b:fb Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:4e:f9:b5 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:ca:2e:b8 Speed:-1 Mtu:1496} {Name:ens7.44 MacAddress:52:54:00:67:87:6f Speed:-1 Mtu:1496} {Name:eth10 MacAddress:7e:17:c6:9c:a9:16 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:e6:3d:84:cc:d3:a5 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.304286 4922 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.304612 4922 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.305559 4922 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.305988 4922 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.306067 4922 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.306597 4922 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.306624 4922 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.307087 4922 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.307155 4922 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.307726 4922 state_mem.go:36] "Initialized new in-memory state store" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.307912 4922 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.309251 4922 kubelet.go:418] "Attempting to sync node with API server" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.309299 4922 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.309364 4922 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.309393 4922 kubelet.go:324] "Adding apiserver pod source" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.309418 4922 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.312289 4922 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.312670 4922 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.312804 4922 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.143:6443: connect: connection refused" logger="UnhandledError" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.312668 4922 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.312911 4922 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.143:6443: connect: connection refused" logger="UnhandledError" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.313036 4922 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.314654 4922 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315787 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315848 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315863 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315877 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315901 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315917 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315933 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315956 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315974 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.315990 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.316011 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.316025 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.316332 4922 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.317248 4922 server.go:1280] "Started kubelet" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.317801 4922 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.317609 4922 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.317607 4922 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.319061 4922 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 06:52:35 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.323886 4922 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.323945 4922 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.324423 4922 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.324468 4922 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.324723 4922 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.325164 4922 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 22:01:12.382954354 +0000 UTC Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.325290 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="200ms" Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.325271 4922 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.326806 4922 factory.go:55] Registering systemd factory Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.326201 4922 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.143:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c191a6d5eae7f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 06:52:35.317157503 +0000 UTC m=+0.237553145,LastTimestamp:2025-11-28 06:52:35.317157503 +0000 UTC m=+0.237553145,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.326927 4922 factory.go:221] Registration of the systemd container factory successfully Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.328307 4922 factory.go:153] Registering CRI-O factory Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.328365 4922 factory.go:221] Registration of the crio container factory successfully Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.328495 4922 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.328532 4922 factory.go:103] Registering Raw factory Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.328556 4922 manager.go:1196] Started watching for new ooms in manager Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.330116 4922 manager.go:319] Starting recovery of all containers Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.330132 4922 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.330476 4922 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.143:6443: connect: connection refused" logger="UnhandledError" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.331901 4922 server.go:460] "Adding debug handlers to kubelet server" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351707 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351808 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351831 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351850 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351870 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351934 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351955 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351973 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.351996 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352014 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352030 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352049 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352067 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352088 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352106 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352125 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352145 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352163 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352263 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352282 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352299 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352358 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352379 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352399 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352418 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352477 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.352508 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354374 4922 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354426 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354453 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354474 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354497 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354517 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354534 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354551 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.354984 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355003 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355022 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355041 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355094 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355112 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355133 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355151 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355170 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355187 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355206 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355250 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355273 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355295 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355313 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355331 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355350 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355367 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355394 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355414 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355436 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355456 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355515 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355533 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355553 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355572 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355591 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355610 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355628 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355657 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355676 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355699 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355717 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355736 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355754 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355771 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355789 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355808 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355826 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355844 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355862 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355882 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355903 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355919 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355939 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355959 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355976 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.355994 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356011 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356030 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356047 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356067 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356084 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356102 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356119 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356137 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356157 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356177 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356197 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356241 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356261 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356283 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356303 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356323 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356343 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356362 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356382 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356402 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356421 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356445 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356471 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356492 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356514 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356536 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356556 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356577 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356598 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356619 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356641 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356659 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356681 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356699 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356717 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356735 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356754 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356772 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356792 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356810 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356830 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356849 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356868 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356889 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356912 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356937 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356963 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.356982 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357006 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357048 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357067 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357089 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357112 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357136 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357162 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357183 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357207 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357267 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357288 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357308 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357328 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357348 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357369 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357388 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357407 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357433 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357454 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357473 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357493 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357511 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357528 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357545 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357564 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357581 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357606 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357627 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357645 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357713 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357733 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357756 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357781 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357803 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357821 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357838 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357857 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357878 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357895 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357913 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357931 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357948 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357966 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.357983 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358000 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358024 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358049 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358072 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358096 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358122 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358145 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358165 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358183 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358200 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358250 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358279 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358306 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358330 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358348 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358369 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358393 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358415 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358434 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358455 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358473 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358493 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358512 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358530 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358548 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358567 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358586 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358612 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358635 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358658 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358677 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358695 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358712 4922 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358732 4922 reconstruct.go:97] "Volume reconstruction finished" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.358747 4922 reconciler.go:26] "Reconciler: start to sync state" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.372670 4922 manager.go:324] Recovery completed Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.390135 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.392630 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.392703 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.392723 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.394280 4922 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.394311 4922 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.394334 4922 state_mem.go:36] "Initialized new in-memory state store" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.394280 4922 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.397100 4922 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.397149 4922 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.397185 4922 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.397254 4922 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.399504 4922 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.399644 4922 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.143:6443: connect: connection refused" logger="UnhandledError" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.407520 4922 policy_none.go:49] "None policy: Start" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.408264 4922 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.408314 4922 state_mem.go:35] "Initializing new in-memory state store" Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.426423 4922 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.463403 4922 manager.go:334] "Starting Device Plugin manager" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.463465 4922 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.463520 4922 server.go:79] "Starting device plugin registration server" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.464093 4922 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.464123 4922 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.464265 4922 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.464339 4922 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.464352 4922 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.476127 4922 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.497562 4922 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.497725 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.499418 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.499466 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.499514 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.499711 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.500057 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.500166 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501041 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501122 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501141 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501172 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501187 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501148 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501467 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501660 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.501723 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.502965 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.503013 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.503032 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.502969 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.503065 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.503077 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.503205 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.503412 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.503472 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504096 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504164 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504187 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504584 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504682 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504724 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504725 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504850 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.504865 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.506156 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.506193 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.506207 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.506792 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.506868 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.506903 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.507330 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.507409 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.509093 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.509155 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.509172 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.526139 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="400ms" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.560974 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561029 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561093 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561152 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561247 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561309 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561358 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561392 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561430 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561467 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561497 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561521 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561576 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561628 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.561668 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.564328 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.569570 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.569625 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.569646 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.569692 4922 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.570253 4922 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.143:6443: connect: connection refused" node="crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.662948 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663018 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663068 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663143 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663199 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663250 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663297 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663369 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663378 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663443 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663376 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663433 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663493 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663447 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663597 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663652 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663666 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663603 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663727 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663763 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663793 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663807 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663823 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663854 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663876 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663893 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663913 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663926 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.663963 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.664079 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.771256 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.773370 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.773425 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.773446 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.773480 4922 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.774071 4922 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.143:6443: connect: connection refused" node="crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.841094 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.850554 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.875193 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b4764fbc0718f2b43739d326fcaca63844403366fb898bc749723ca3b1b9a94f WatchSource:0}: Error finding container b4764fbc0718f2b43739d326fcaca63844403366fb898bc749723ca3b1b9a94f: Status 404 returned error can't find the container with id b4764fbc0718f2b43739d326fcaca63844403366fb898bc749723ca3b1b9a94f Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.879133 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-f7d8f40fc79c25130664344bc60ad4bcca7be0f437d1a7449a93488158df370a WatchSource:0}: Error finding container f7d8f40fc79c25130664344bc60ad4bcca7be0f437d1a7449a93488158df370a: Status 404 returned error can't find the container with id f7d8f40fc79c25130664344bc60ad4bcca7be0f437d1a7449a93488158df370a Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.908424 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.927607 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-00216d610b9021f7e74927f2b86b3989ef3c0979e5f02a154d4910686500c3f1 WatchSource:0}: Error finding container 00216d610b9021f7e74927f2b86b3989ef3c0979e5f02a154d4910686500c3f1: Status 404 returned error can't find the container with id 00216d610b9021f7e74927f2b86b3989ef3c0979e5f02a154d4910686500c3f1 Nov 28 06:52:35 crc kubenswrapper[4922]: E1128 06:52:35.927621 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="800ms" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.949490 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: I1128 06:52:35.959880 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.965274 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-a68f50d4c99a69a502c8bf33175e61e9f57333c0e9ad5e0a388edf2c0f1bf213 WatchSource:0}: Error finding container a68f50d4c99a69a502c8bf33175e61e9f57333c0e9ad5e0a388edf2c0f1bf213: Status 404 returned error can't find the container with id a68f50d4c99a69a502c8bf33175e61e9f57333c0e9ad5e0a388edf2c0f1bf213 Nov 28 06:52:35 crc kubenswrapper[4922]: W1128 06:52:35.979891 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-231c638dc2afa15abd603cc2faa0b9a076a6a34599456ad9ff4de3def307e7cd WatchSource:0}: Error finding container 231c638dc2afa15abd603cc2faa0b9a076a6a34599456ad9ff4de3def307e7cd: Status 404 returned error can't find the container with id 231c638dc2afa15abd603cc2faa0b9a076a6a34599456ad9ff4de3def307e7cd Nov 28 06:52:36 crc kubenswrapper[4922]: W1128 06:52:36.151447 4922 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:36 crc kubenswrapper[4922]: E1128 06:52:36.151527 4922 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.143:6443: connect: connection refused" logger="UnhandledError" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.174210 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.176043 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.176091 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.176101 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.176130 4922 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 06:52:36 crc kubenswrapper[4922]: E1128 06:52:36.176644 4922 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.143:6443: connect: connection refused" node="crc" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.319561 4922 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.325835 4922 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 02:07:46.541018918 +0000 UTC Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.325934 4922 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 187h15m10.215088327s for next certificate rotation Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.405551 4922 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="74e297c046ab6458dc1922bc05089d9fbba4972e952fefb845a81e8852217743" exitCode=0 Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.405674 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"74e297c046ab6458dc1922bc05089d9fbba4972e952fefb845a81e8852217743"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.405842 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b4764fbc0718f2b43739d326fcaca63844403366fb898bc749723ca3b1b9a94f"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.406072 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.407664 4922 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907" exitCode=0 Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.407739 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.407778 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"231c638dc2afa15abd603cc2faa0b9a076a6a34599456ad9ff4de3def307e7cd"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.407794 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.407835 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.407854 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.407865 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.410626 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.410651 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.410660 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.412929 4922 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275" exitCode=0 Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.412984 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.413080 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a68f50d4c99a69a502c8bf33175e61e9f57333c0e9ad5e0a388edf2c0f1bf213"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.413279 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.415243 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.415292 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.415314 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.416214 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.416292 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"00216d610b9021f7e74927f2b86b3989ef3c0979e5f02a154d4910686500c3f1"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.417707 4922 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47" exitCode=0 Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.417739 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.417764 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f7d8f40fc79c25130664344bc60ad4bcca7be0f437d1a7449a93488158df370a"} Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.417849 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.418602 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.418631 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.418645 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.420371 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.421078 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.421105 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.421114 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:36 crc kubenswrapper[4922]: W1128 06:52:36.576319 4922 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:36 crc kubenswrapper[4922]: E1128 06:52:36.576382 4922 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.143:6443: connect: connection refused" logger="UnhandledError" Nov 28 06:52:36 crc kubenswrapper[4922]: E1128 06:52:36.728618 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="1.6s" Nov 28 06:52:36 crc kubenswrapper[4922]: W1128 06:52:36.788265 4922 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:36 crc kubenswrapper[4922]: E1128 06:52:36.788345 4922 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.143:6443: connect: connection refused" logger="UnhandledError" Nov 28 06:52:36 crc kubenswrapper[4922]: W1128 06:52:36.806790 4922 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.143:6443: connect: connection refused Nov 28 06:52:36 crc kubenswrapper[4922]: E1128 06:52:36.806892 4922 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.143:6443: connect: connection refused" logger="UnhandledError" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.976719 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.978181 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.978230 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.978240 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:36 crc kubenswrapper[4922]: I1128 06:52:36.978265 4922 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.421936 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"d11064f96da0c5b3c10a95671667cd2643279cccaafadf1ae910cffa4a1613ec"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.422021 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.422826 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.422865 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.422874 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.424935 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.424961 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.424970 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.425034 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.425654 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.425678 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.425685 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.427597 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.427626 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.427634 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.427637 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.428543 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.428585 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.428598 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.430413 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.430452 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.430466 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.430477 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.432267 4922 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6bdbf05d3df8ac497778f79f94f805b63a9a879353e4bc7e670054ae4224ed8a" exitCode=0 Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.432301 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6bdbf05d3df8ac497778f79f94f805b63a9a879353e4bc7e670054ae4224ed8a"} Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.432423 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.433304 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.433330 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:37 crc kubenswrapper[4922]: I1128 06:52:37.433340 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.009092 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.014689 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.439374 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951"} Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.439546 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.441119 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.441168 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.441185 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.443495 4922 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="9dd6a4216b760f04b166ad14dafd51d365a0e3d70bffd901a8832595314de337" exitCode=0 Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.443587 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"9dd6a4216b760f04b166ad14dafd51d365a0e3d70bffd901a8832595314de337"} Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.443664 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.443773 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.445204 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.445300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.445339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.446457 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.446527 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:38 crc kubenswrapper[4922]: I1128 06:52:38.446554 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.452556 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e2f748fca556d4adf2ab60a532744f8ae67edf55eb3804c03200b6d8ff4fa40b"} Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.452626 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3091c94fa0eee49ec805396d2a8e082a8428e92abd834f80670173611dea16e2"} Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.452649 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"81c4d4bc41526ec84fd23ce57c1aec1457d145ee86ba65f785e699bf5234e498"} Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.452689 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.452741 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.452772 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.452848 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.454529 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.454934 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.454953 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.456744 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.456794 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:39 crc kubenswrapper[4922]: I1128 06:52:39.456819 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.209051 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.463314 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9ed8f1d210b89e31af569d93f2678c02160323a61022fbfc705b18e078d1c900"} Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.463372 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.463386 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c57d800639e5d3da6ddbaaa824296a41cb81406479f20297d32e46702be47e17"} Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.463422 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.463430 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.464986 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.465006 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.465032 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.465046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.465074 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.465052 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:40 crc kubenswrapper[4922]: I1128 06:52:40.769814 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:41 crc kubenswrapper[4922]: I1128 06:52:41.465554 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:41 crc kubenswrapper[4922]: I1128 06:52:41.465657 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:41 crc kubenswrapper[4922]: I1128 06:52:41.468338 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:41 crc kubenswrapper[4922]: I1128 06:52:41.468437 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:41 crc kubenswrapper[4922]: I1128 06:52:41.468472 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:41 crc kubenswrapper[4922]: I1128 06:52:41.468518 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:41 crc kubenswrapper[4922]: I1128 06:52:41.468575 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:41 crc kubenswrapper[4922]: I1128 06:52:41.468601 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.269299 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.468614 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.469915 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.469970 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.469988 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.665962 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.666153 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.666210 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.667896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.668065 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:42 crc kubenswrapper[4922]: I1128 06:52:42.668191 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.107966 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.108957 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.111166 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.111408 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.111542 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.654900 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.655075 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.655130 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.656864 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.656897 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.656914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.666622 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.666951 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.668558 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.668608 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:43 crc kubenswrapper[4922]: I1128 06:52:43.668625 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:45 crc kubenswrapper[4922]: E1128 06:52:45.476271 4922 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 06:52:45 crc kubenswrapper[4922]: I1128 06:52:45.666466 4922 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 06:52:45 crc kubenswrapper[4922]: I1128 06:52:45.666568 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 06:52:45 crc kubenswrapper[4922]: I1128 06:52:45.782930 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:45 crc kubenswrapper[4922]: I1128 06:52:45.783160 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:45 crc kubenswrapper[4922]: I1128 06:52:45.784968 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:45 crc kubenswrapper[4922]: I1128 06:52:45.785066 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:45 crc kubenswrapper[4922]: I1128 06:52:45.785094 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:46 crc kubenswrapper[4922]: E1128 06:52:46.979613 4922 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 28 06:52:47 crc kubenswrapper[4922]: I1128 06:52:47.319428 4922 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 06:52:48 crc kubenswrapper[4922]: E1128 06:52:48.329967 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.580594 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.582029 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.582061 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.582071 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.582089 4922 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.776568 4922 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.776658 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.787582 4922 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.787652 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.923735 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.923957 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.925200 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.925295 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.925314 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:48 crc kubenswrapper[4922]: I1128 06:52:48.985111 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 06:52:49 crc kubenswrapper[4922]: I1128 06:52:49.488355 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:49 crc kubenswrapper[4922]: I1128 06:52:49.489628 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:49 crc kubenswrapper[4922]: I1128 06:52:49.489700 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:49 crc kubenswrapper[4922]: I1128 06:52:49.489719 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:49 crc kubenswrapper[4922]: I1128 06:52:49.511988 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 06:52:50 crc kubenswrapper[4922]: I1128 06:52:50.490473 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:50 crc kubenswrapper[4922]: I1128 06:52:50.492660 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:50 crc kubenswrapper[4922]: I1128 06:52:50.492711 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:50 crc kubenswrapper[4922]: I1128 06:52:50.492730 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.278921 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.279198 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.282538 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.282592 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.282607 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.286170 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.496625 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.496720 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.498268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.498405 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:52 crc kubenswrapper[4922]: I1128 06:52:52.498477 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.765069 4922 trace.go:236] Trace[528859280]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 06:52:39.453) (total time: 14311ms): Nov 28 06:52:53 crc kubenswrapper[4922]: Trace[528859280]: ---"Objects listed" error: 14311ms (06:52:53.764) Nov 28 06:52:53 crc kubenswrapper[4922]: Trace[528859280]: [14.311112265s] [14.311112265s] END Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.765110 4922 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.765499 4922 trace.go:236] Trace[1940238849]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 06:52:39.012) (total time: 14753ms): Nov 28 06:52:53 crc kubenswrapper[4922]: Trace[1940238849]: ---"Objects listed" error: 14753ms (06:52:53.765) Nov 28 06:52:53 crc kubenswrapper[4922]: Trace[1940238849]: [14.753386185s] [14.753386185s] END Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.765532 4922 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.765880 4922 trace.go:236] Trace[1381644967]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 06:52:39.213) (total time: 14552ms): Nov 28 06:52:53 crc kubenswrapper[4922]: Trace[1381644967]: ---"Objects listed" error: 14552ms (06:52:53.765) Nov 28 06:52:53 crc kubenswrapper[4922]: Trace[1381644967]: [14.552579938s] [14.552579938s] END Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.765893 4922 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.768664 4922 trace.go:236] Trace[348491439]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 06:52:39.317) (total time: 14450ms): Nov 28 06:52:53 crc kubenswrapper[4922]: Trace[348491439]: ---"Objects listed" error: 14450ms (06:52:53.768) Nov 28 06:52:53 crc kubenswrapper[4922]: Trace[348491439]: [14.450707947s] [14.450707947s] END Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.768684 4922 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.769133 4922 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 06:52:53 crc kubenswrapper[4922]: E1128 06:52:53.769712 4922 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.829617 4922 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": EOF" start-of-body= Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.829690 4922 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": EOF" start-of-body= Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.829758 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": EOF" Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.829686 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": EOF" Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.830551 4922 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38404->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.830589 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:38404->192.168.126.11:17697: read: connection reset by peer" Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.857811 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.873801 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:53 crc kubenswrapper[4922]: I1128 06:52:53.875062 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.321719 4922 apiserver.go:52] "Watching apiserver" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.325322 4922 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.325812 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.326435 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.326466 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.326486 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.326530 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.326549 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.326576 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.326716 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.326790 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.327357 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.331404 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.331413 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.332234 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.332518 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.334905 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.335368 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.335490 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.337535 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.338790 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.402716 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.418935 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.425430 4922 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.433272 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-w9zxj"] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.433600 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-w9zxj" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.435834 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.436342 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.436573 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.437288 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.452341 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.463687 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.472668 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.474916 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.474961 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.474988 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475015 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475038 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475059 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475081 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475102 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475124 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475148 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475169 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475193 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475238 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475264 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475289 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475312 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475335 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475359 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475383 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475407 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475432 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475474 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475501 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475523 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475545 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475565 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475585 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475603 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475626 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475356 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475662 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475452 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475646 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475733 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475755 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475777 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475800 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475823 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475843 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475868 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475891 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475911 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475929 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475949 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475966 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475986 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476006 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476110 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476138 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476161 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476184 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476206 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476273 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476300 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476324 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476349 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476373 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476395 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476419 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476444 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476467 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476492 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476518 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476545 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476566 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476591 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476618 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476638 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476660 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476685 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476707 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476730 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476754 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476777 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476802 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476825 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476853 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476879 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476903 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476927 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476952 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476976 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477000 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477025 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477053 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477080 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477101 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477125 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477146 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477204 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477244 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477269 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475449 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477289 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475473 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475639 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475650 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475824 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475852 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475857 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475886 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.475969 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476050 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476076 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476239 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476262 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476474 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476481 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476566 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476624 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476651 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476731 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476794 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476835 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476879 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476929 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.476985 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477010 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477044 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477048 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477126 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477157 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477193 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477270 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477333 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477344 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477294 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477503 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477514 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477517 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477519 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477558 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477668 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477683 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477706 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477732 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477793 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477817 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477841 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477915 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477927 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477942 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477982 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478011 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478138 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478164 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478196 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478243 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478312 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478340 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478384 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478494 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478654 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478739 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478761 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.478839 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:52:54.978822012 +0000 UTC m=+19.899217594 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478876 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.478950 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.479110 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.479394 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.479591 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.479979 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.482527 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.482556 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.482801 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.482877 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.477534 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.483453 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.483505 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.483564 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.483697 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.483783 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.483833 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.483858 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.483965 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.484109 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.486685 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.487304 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.488469 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490601 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490630 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490650 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490669 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490686 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490705 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490721 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490738 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490755 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490772 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490788 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490803 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490821 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490839 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490854 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490869 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490889 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490910 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490929 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490947 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490964 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.490982 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491000 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491017 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491036 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491056 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491076 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491095 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491112 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491129 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491148 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491151 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491172 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491194 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491209 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491277 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491299 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491316 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491336 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491353 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491369 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491387 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491480 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491635 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491655 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.494381 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.494549 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.494598 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.494916 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.495108 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.495352 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.495567 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.495831 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.496111 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.496411 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.496428 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.496613 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.496999 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.497016 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.497476 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.497720 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.498257 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.498346 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.491673 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.499758 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.499791 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.499814 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.500058 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.500113 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.500141 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.500176 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501536 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501602 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501624 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501646 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501680 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501705 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501725 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501744 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501766 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501791 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501813 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501833 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501854 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501874 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501895 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501914 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501937 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501959 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501978 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.501999 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502020 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502042 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502060 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502085 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502105 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502123 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502144 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502167 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502191 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502208 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502250 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502270 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502291 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502312 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502335 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502354 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502375 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502395 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502415 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502433 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502453 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502476 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502497 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502519 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.502538 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503179 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503209 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503250 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503270 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503288 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503309 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503332 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503351 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503423 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503449 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503509 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503532 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503554 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503577 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503598 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503621 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503643 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503666 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503690 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503820 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503845 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503869 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503981 4922 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.503995 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504007 4922 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504022 4922 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504036 4922 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504052 4922 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504065 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504085 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504097 4922 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504107 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504131 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504146 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504156 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504167 4922 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504179 4922 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504189 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504200 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504210 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504238 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504249 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504259 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504268 4922 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504281 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504293 4922 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504304 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504317 4922 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504326 4922 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504338 4922 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504350 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504364 4922 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504377 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504389 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504399 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504411 4922 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504422 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504432 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504442 4922 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504454 4922 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504465 4922 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504475 4922 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504487 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504499 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504509 4922 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504520 4922 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504532 4922 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504541 4922 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504553 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504563 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504577 4922 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504586 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504596 4922 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504607 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504619 4922 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504667 4922 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504683 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504702 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504738 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504752 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504763 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504799 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504813 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504826 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504837 4922 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504851 4922 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504862 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504875 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504890 4922 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504902 4922 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504915 4922 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504925 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504937 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504947 4922 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504958 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504967 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504978 4922 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504988 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.504998 4922 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505011 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505028 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505041 4922 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505054 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505070 4922 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505083 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505095 4922 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505107 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505123 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505136 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505147 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505159 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505174 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505185 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505196 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505211 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505241 4922 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505274 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505293 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505311 4922 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505645 4922 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505665 4922 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505679 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505697 4922 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505711 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505724 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505737 4922 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505755 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.506399 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.505767 4922 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.508233 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.508779 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:55.008292076 +0000 UTC m=+19.928687658 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.509378 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.509772 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.509891 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.510059 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.510106 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.510357 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.510762 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.511457 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.512005 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.512486 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.512516 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.512696 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.512715 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.512788 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.512935 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.512949 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.513001 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.513164 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.513315 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.513381 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.513602 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.513652 4922 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.516270 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.513460 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.514533 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:55.014513704 +0000 UTC m=+19.934909286 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.514615 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.511301 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.517561 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.518077 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.514767 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.515062 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.518286 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.515145 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.515199 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.515252 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.515405 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.518349 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.515639 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.516299 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.516415 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.516939 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.518656 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.518777 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.518922 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.519070 4922 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951" exitCode=255 Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.519339 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.519366 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.519413 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951"} Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.519573 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.519850 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.519872 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.520805 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.521148 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.521718 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.522060 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.522839 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.523962 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.529179 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.525171 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.526093 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.526480 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.527556 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.527710 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.528665 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.528788 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.524461 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.524174 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.529349 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.529396 4922 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.529459 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:55.029441333 +0000 UTC m=+19.949836925 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.527153 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.530543 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.530569 4922 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.530621 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:55.030608186 +0000 UTC m=+19.951003768 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.531795 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.532608 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.533839 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.535031 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.537559 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: E1128 06:52:54.540724 4922 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.544822 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.545387 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.546018 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.546363 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.549540 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.550586 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.550598 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.551358 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.557328 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.557810 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.557826 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.558068 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.558380 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.558778 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.558878 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.559101 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.559627 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.559867 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.559896 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.560002 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.560248 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.560361 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.560783 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.561163 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.561448 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.562762 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.563085 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.563926 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.566912 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.568649 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.569028 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.572208 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.572830 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.574417 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.591995 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.595175 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.603010 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.603203 4922 scope.go:117] "RemoveContainer" containerID="43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.603854 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609184 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghdft\" (UniqueName: \"kubernetes.io/projected/e1f29751-83a6-4469-b733-50e654026f8c-kube-api-access-ghdft\") pod \"node-resolver-w9zxj\" (UID: \"e1f29751-83a6-4469-b733-50e654026f8c\") " pod="openshift-dns/node-resolver-w9zxj" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609245 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609265 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e1f29751-83a6-4469-b733-50e654026f8c-hosts-file\") pod \"node-resolver-w9zxj\" (UID: \"e1f29751-83a6-4469-b733-50e654026f8c\") " pod="openshift-dns/node-resolver-w9zxj" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609286 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609327 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609338 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609338 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609348 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609389 4922 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609401 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609415 4922 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609429 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609442 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609452 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609458 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609461 4922 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609493 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609504 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609514 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609524 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609535 4922 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609545 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609554 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609564 4922 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609573 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609582 4922 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609592 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609601 4922 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609611 4922 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609621 4922 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609631 4922 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609640 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609650 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609660 4922 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609668 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609677 4922 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609685 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609694 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609703 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609712 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609720 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609729 4922 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609738 4922 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609747 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609756 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609766 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609775 4922 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609783 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609792 4922 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609801 4922 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609810 4922 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609818 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609827 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609836 4922 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609845 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609853 4922 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609861 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609870 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609877 4922 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609885 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609893 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609902 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609910 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609918 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609927 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609936 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609943 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609951 4922 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609959 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609967 4922 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609975 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609983 4922 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.609992 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610000 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610007 4922 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610014 4922 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610021 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610030 4922 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610039 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610047 4922 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610057 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610065 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610073 4922 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610081 4922 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610090 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610099 4922 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610148 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610180 4922 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610190 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610226 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610234 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610242 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610250 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.610259 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.614267 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.626017 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.636240 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.639421 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.645866 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.653762 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.710760 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghdft\" (UniqueName: \"kubernetes.io/projected/e1f29751-83a6-4469-b733-50e654026f8c-kube-api-access-ghdft\") pod \"node-resolver-w9zxj\" (UID: \"e1f29751-83a6-4469-b733-50e654026f8c\") " pod="openshift-dns/node-resolver-w9zxj" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.711112 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e1f29751-83a6-4469-b733-50e654026f8c-hosts-file\") pod \"node-resolver-w9zxj\" (UID: \"e1f29751-83a6-4469-b733-50e654026f8c\") " pod="openshift-dns/node-resolver-w9zxj" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.711316 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/e1f29751-83a6-4469-b733-50e654026f8c-hosts-file\") pod \"node-resolver-w9zxj\" (UID: \"e1f29751-83a6-4469-b733-50e654026f8c\") " pod="openshift-dns/node-resolver-w9zxj" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.734398 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghdft\" (UniqueName: \"kubernetes.io/projected/e1f29751-83a6-4469-b733-50e654026f8c-kube-api-access-ghdft\") pod \"node-resolver-w9zxj\" (UID: \"e1f29751-83a6-4469-b733-50e654026f8c\") " pod="openshift-dns/node-resolver-w9zxj" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.744320 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-w9zxj" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.820041 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-jgzjd"] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.820366 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7gdxt"] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.822552 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.825856 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.826433 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.826814 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.828359 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-h8wk6"] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.828920 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-xm948"] Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.829066 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.830149 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.836583 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.842989 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.843303 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.853910 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.854343 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.854749 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.854911 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.855059 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.855196 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.855353 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.855514 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.855663 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.855774 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.855982 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.856123 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.856355 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.856520 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.876423 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.903537 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914511 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-slash\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914550 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-cni-multus\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914570 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-var-lib-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914588 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-systemd-units\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914601 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-netns\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914616 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-kubelet\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914631 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-node-log\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914656 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-cnibin\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914670 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-system-cni-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914685 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-cni-bin\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914698 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-etc-kubernetes\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914721 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-etc-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914736 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-ovn-kubernetes\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914749 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-cni-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914766 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-systemd\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914783 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-kubelet\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914798 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-multus-certs\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914812 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwd5b\" (UniqueName: \"kubernetes.io/projected/b05f16bb-1729-4fd8-883a-4fb960bf4cff-kube-api-access-fwd5b\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914837 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-bin\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914851 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-ovn\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914865 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-script-lib\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914889 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-netns\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914909 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914925 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-env-overrides\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914941 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-config\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914956 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b05f16bb-1729-4fd8-883a-4fb960bf4cff-cni-binary-copy\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914969 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-k8s-cni-cncf-io\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914984 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-hostroot\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.914998 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-conf-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.915011 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-daemon-config\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.915024 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-log-socket\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.915037 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-netd\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.915056 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovn-node-metrics-cert\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.915070 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtxdn\" (UniqueName: \"kubernetes.io/projected/ac5c6b67-2037-400e-8e03-845b47d8ca67-kube-api-access-dtxdn\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.915084 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-os-release\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.915107 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-socket-dir-parent\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.915122 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.916002 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.937614 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.956356 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.972212 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:54 crc kubenswrapper[4922]: I1128 06:52:54.982493 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.003407 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.011006 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015483 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015583 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0498340a-5e95-42bf-a0a6-8ac89a6b8858-mcd-auth-proxy-config\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015609 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-etc-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015627 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-ovn-kubernetes\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015642 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-cni-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015656 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwd5b\" (UniqueName: \"kubernetes.io/projected/b05f16bb-1729-4fd8-883a-4fb960bf4cff-kube-api-access-fwd5b\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015689 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015709 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-systemd\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015732 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-kubelet\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015748 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-multus-certs\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015777 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-bin\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015792 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0498340a-5e95-42bf-a0a6-8ac89a6b8858-proxy-tls\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015817 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-ovn\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015842 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-script-lib\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015861 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-env-overrides\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015877 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015897 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015911 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-netns\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015928 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015944 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-cnibin\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015973 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-config\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.015988 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b05f16bb-1729-4fd8-883a-4fb960bf4cff-cni-binary-copy\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016005 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-k8s-cni-cncf-io\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016021 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-hostroot\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016036 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-conf-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016052 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-daemon-config\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016067 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtxdn\" (UniqueName: \"kubernetes.io/projected/ac5c6b67-2037-400e-8e03-845b47d8ca67-kube-api-access-dtxdn\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016081 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-os-release\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016095 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-socket-dir-parent\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016111 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016127 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-log-socket\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016141 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-netd\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016155 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovn-node-metrics-cert\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016171 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/28b50482-ffec-406c-9ff9-9604bce5d5d5-cni-binary-copy\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016186 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76zhj\" (UniqueName: \"kubernetes.io/projected/28b50482-ffec-406c-9ff9-9604bce5d5d5-kube-api-access-76zhj\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016201 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-os-release\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016233 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-slash\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016249 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-system-cni-dir\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016267 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-var-lib-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016283 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-cni-multus\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016299 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-kubelet\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016313 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-systemd-units\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016328 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-netns\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016353 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/28b50482-ffec-406c-9ff9-9604bce5d5d5-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016370 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-node-log\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016385 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-cnibin\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016400 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0498340a-5e95-42bf-a0a6-8ac89a6b8858-rootfs\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016416 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-etc-kubernetes\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016432 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf7vq\" (UniqueName: \"kubernetes.io/projected/0498340a-5e95-42bf-a0a6-8ac89a6b8858-kube-api-access-zf7vq\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016457 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-system-cni-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016474 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-cni-bin\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016531 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-cni-bin\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.016595 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:52:56.0165821 +0000 UTC m=+20.936977682 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016624 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-etc-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016648 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-ovn-kubernetes\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016692 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-cni-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.016883 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.016914 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:56.01690617 +0000 UTC m=+20.937301752 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016936 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-systemd\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016959 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-kubelet\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016977 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-multus-certs\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.016998 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-bin\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017020 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-ovn\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017629 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-script-lib\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017691 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-slash\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017723 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-var-lib-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017743 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-var-lib-cni-multus\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017763 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-kubelet\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017813 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-systemd-units\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017834 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-netns\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017859 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-node-log\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017889 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-cnibin\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017915 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-etc-kubernetes\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.017950 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-system-cni-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.018144 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-host-run-k8s-cni-cncf-io\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.018489 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-env-overrides\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.018568 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.018595 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:56.018587127 +0000 UTC m=+20.938982699 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.018617 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-netns\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.018638 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019003 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-config\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019311 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-os-release\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019510 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-log-socket\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019498 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-openvswitch\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019538 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-netd\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019553 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-socket-dir-parent\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019459 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-hostroot\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019844 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b05f16bb-1729-4fd8-883a-4fb960bf4cff-cni-binary-copy\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.019939 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-daemon-config\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.020010 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b05f16bb-1729-4fd8-883a-4fb960bf4cff-multus-conf-dir\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.021751 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovn-node-metrics-cert\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.024773 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.038768 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwd5b\" (UniqueName: \"kubernetes.io/projected/b05f16bb-1729-4fd8-883a-4fb960bf4cff-kube-api-access-fwd5b\") pod \"multus-jgzjd\" (UID: \"b05f16bb-1729-4fd8-883a-4fb960bf4cff\") " pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.038890 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtxdn\" (UniqueName: \"kubernetes.io/projected/ac5c6b67-2037-400e-8e03-845b47d8ca67-kube-api-access-dtxdn\") pod \"ovnkube-node-7gdxt\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.040452 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.062356 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.077915 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.091743 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.103518 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.118535 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122718 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0498340a-5e95-42bf-a0a6-8ac89a6b8858-mcd-auth-proxy-config\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122774 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122793 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0498340a-5e95-42bf-a0a6-8ac89a6b8858-proxy-tls\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122824 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122851 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-cnibin\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122872 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/28b50482-ffec-406c-9ff9-9604bce5d5d5-cni-binary-copy\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122891 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76zhj\" (UniqueName: \"kubernetes.io/projected/28b50482-ffec-406c-9ff9-9604bce5d5d5-kube-api-access-76zhj\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122913 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-system-cni-dir\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122929 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-os-release\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122951 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/28b50482-ffec-406c-9ff9-9604bce5d5d5-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.122971 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0498340a-5e95-42bf-a0a6-8ac89a6b8858-rootfs\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.123000 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.123020 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf7vq\" (UniqueName: \"kubernetes.io/projected/0498340a-5e95-42bf-a0a6-8ac89a6b8858-kube-api-access-zf7vq\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.123334 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-os-release\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.123894 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/28b50482-ffec-406c-9ff9-9604bce5d5d5-cni-binary-copy\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.123924 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-system-cni-dir\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.123936 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/28b50482-ffec-406c-9ff9-9604bce5d5d5-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.124003 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0498340a-5e95-42bf-a0a6-8ac89a6b8858-rootfs\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.124108 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.124124 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.124135 4922 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.124189 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:56.124177715 +0000 UTC m=+21.044573297 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.124539 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.124578 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.124599 4922 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.124533 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0498340a-5e95-42bf-a0a6-8ac89a6b8858-mcd-auth-proxy-config\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.124682 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-cnibin\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: E1128 06:52:55.124698 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:56.124675289 +0000 UTC m=+21.045070861 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.127049 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/28b50482-ffec-406c-9ff9-9604bce5d5d5-tuning-conf-dir\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.127358 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0498340a-5e95-42bf-a0a6-8ac89a6b8858-proxy-tls\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.132362 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.140729 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf7vq\" (UniqueName: \"kubernetes.io/projected/0498340a-5e95-42bf-a0a6-8ac89a6b8858-kube-api-access-zf7vq\") pod \"machine-config-daemon-h8wk6\" (UID: \"0498340a-5e95-42bf-a0a6-8ac89a6b8858\") " pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.141995 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.142504 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76zhj\" (UniqueName: \"kubernetes.io/projected/28b50482-ffec-406c-9ff9-9604bce5d5d5-kube-api-access-76zhj\") pod \"multus-additional-cni-plugins-xm948\" (UID: \"28b50482-ffec-406c-9ff9-9604bce5d5d5\") " pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.148950 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.159852 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.168855 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.177691 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.179850 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-jgzjd" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.184600 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.192743 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 06:52:55 crc kubenswrapper[4922]: W1128 06:52:55.195256 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb05f16bb_1729_4fd8_883a_4fb960bf4cff.slice/crio-0328e2a2343b100c46cf100bfbdcbd60bacdd64e3f57126abb38a1b4a7487ec3 WatchSource:0}: Error finding container 0328e2a2343b100c46cf100bfbdcbd60bacdd64e3f57126abb38a1b4a7487ec3: Status 404 returned error can't find the container with id 0328e2a2343b100c46cf100bfbdcbd60bacdd64e3f57126abb38a1b4a7487ec3 Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.205474 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.221437 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-xm948" Nov 28 06:52:55 crc kubenswrapper[4922]: W1128 06:52:55.245718 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac5c6b67_2037_400e_8e03_845b47d8ca67.slice/crio-6cf5f8236249cc30915ac74dd4562edd109c30472174248c7b941efb278a4f52 WatchSource:0}: Error finding container 6cf5f8236249cc30915ac74dd4562edd109c30472174248c7b941efb278a4f52: Status 404 returned error can't find the container with id 6cf5f8236249cc30915ac74dd4562edd109c30472174248c7b941efb278a4f52 Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.401323 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.401824 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.403021 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.403619 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.404619 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.405206 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.405897 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.406929 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.407662 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.408569 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.409053 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.411754 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.412283 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.412910 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.413859 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.414398 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.415435 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.415389 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.416270 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.416823 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.417434 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.418368 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.418914 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.419739 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.420473 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.420860 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.422115 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.425212 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.425691 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.428451 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.429360 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.429614 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.429818 4922 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.429920 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.433211 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.434045 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.434631 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.436352 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.439817 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.441492 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.442391 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.443518 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.444081 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.445081 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.445694 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.446755 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.447279 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.448202 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.448711 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.450120 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.450660 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.450842 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.451491 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.452123 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.452645 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.454787 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.455876 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.466473 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.520301 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.523090 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.523130 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.523141 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"fea2760ac4afa969e420fd1707066d15480adf0e1d96b29146891781391ac4aa"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.531377 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jgzjd" event={"ID":"b05f16bb-1729-4fd8-883a-4fb960bf4cff","Type":"ContainerStarted","Data":"099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.531413 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jgzjd" event={"ID":"b05f16bb-1729-4fd8-883a-4fb960bf4cff","Type":"ContainerStarted","Data":"0328e2a2343b100c46cf100bfbdcbd60bacdd64e3f57126abb38a1b4a7487ec3"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.537340 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"ae388c2258b136ff7b39710165e121b1dd43b85ce106f9412e8a529a1e31836e"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.543326 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.557417 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.558068 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.567903 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerStarted","Data":"ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.567951 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerStarted","Data":"c0ef4f5d7f5eae6e50c18f4f26c27023e0b0b14c59c25d868688317ccc711a21"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.581553 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.581741 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.581809 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"37a183859d88b6675b197393c453cfcfb2aab0d7afcb86e74cc906644caf255e"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.582925 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24" exitCode=0 Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.582969 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.582986 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"6cf5f8236249cc30915ac74dd4562edd109c30472174248c7b941efb278a4f52"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.584705 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-w9zxj" event={"ID":"e1f29751-83a6-4469-b733-50e654026f8c","Type":"ContainerStarted","Data":"679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.584753 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-w9zxj" event={"ID":"e1f29751-83a6-4469-b733-50e654026f8c","Type":"ContainerStarted","Data":"d00104a21ee7d0bcb53cae7313d86a721b097ead942a3e7e6daa6dd04336b569"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.587246 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.587277 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.587289 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"8b85caebee6a9ecdc0bd2ba80293d624ea1fa0d1a031699ab89d63869ef16d18"} Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.605431 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.621194 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.639398 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.666674 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.683437 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.700672 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.721546 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.738133 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.758717 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.777990 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.799181 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.817553 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.831054 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.842296 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.856257 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.870622 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.881816 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.896625 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.908747 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:55 crc kubenswrapper[4922]: I1128 06:52:55.918665 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.031535 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.031665 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.031728 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:52:58.031700815 +0000 UTC m=+22.952096397 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.031815 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.031871 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:58.03185761 +0000 UTC m=+22.952253192 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.032153 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.032244 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.032277 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:58.032269591 +0000 UTC m=+22.952665173 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.133115 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.133161 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.133298 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.133312 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.133324 4922 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.133374 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:58.13336008 +0000 UTC m=+23.053755662 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.133378 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.133443 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.133457 4922 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.133520 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 06:52:58.133499674 +0000 UTC m=+23.053895256 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.398283 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.398305 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.398703 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.398316 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.398800 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:52:56 crc kubenswrapper[4922]: E1128 06:52:56.398908 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.592471 4922 generic.go:334] "Generic (PLEG): container finished" podID="28b50482-ffec-406c-9ff9-9604bce5d5d5" containerID="ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1" exitCode=0 Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.592578 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerDied","Data":"ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1"} Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.598446 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd"} Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.598593 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0"} Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.598613 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443"} Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.598625 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153"} Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.598636 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782"} Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.598649 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55"} Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.613376 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.628080 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.646782 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.668054 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.689053 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.703621 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.714879 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.726702 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.742891 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.757409 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.776565 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.793112 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.806210 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:56Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.969864 4922 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.972912 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.973043 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.973121 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.973342 4922 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.982765 4922 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.983063 4922 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.984322 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.984364 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.984378 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.984397 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:56 crc kubenswrapper[4922]: I1128 06:52:56.984410 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:56Z","lastTransitionTime":"2025-11-28T06:52:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: E1128 06:52:57.015430 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.019299 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.019326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.019334 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.019349 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.019359 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: E1128 06:52:57.032331 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.035674 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.035710 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.035722 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.035739 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.035750 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: E1128 06:52:57.048239 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.051534 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.051560 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.051577 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.051590 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.051598 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: E1128 06:52:57.063192 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.072791 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.072849 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.072867 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.072890 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.072908 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: E1128 06:52:57.088061 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: E1128 06:52:57.088176 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.089919 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.089960 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.089972 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.089988 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.090001 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.170861 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-z5d9x"] Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.171469 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.172972 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.173357 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.173745 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.174633 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.192903 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.195123 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.195155 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.195189 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.195208 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.195250 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.206375 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.219011 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.231366 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.252474 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.270664 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.286609 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.298176 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.298231 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.298243 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.298259 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.298270 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.308125 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.321706 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.341734 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.348954 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4hdz\" (UniqueName: \"kubernetes.io/projected/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-kube-api-access-g4hdz\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.349129 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-host\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.349463 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-serviceca\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.359294 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.374473 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.395182 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.401794 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.401845 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.401863 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.401884 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.401903 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.423871 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.450281 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-host\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.450203 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-host\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.450536 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-serviceca\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.451678 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4hdz\" (UniqueName: \"kubernetes.io/projected/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-kube-api-access-g4hdz\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.451617 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-serviceca\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.473338 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4hdz\" (UniqueName: \"kubernetes.io/projected/21211ec8-3baf-4230-9cd8-c641f6bdc0e1-kube-api-access-g4hdz\") pod \"node-ca-z5d9x\" (UID: \"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\") " pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.493106 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-z5d9x" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.504196 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.504254 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.504274 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.504300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.504317 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: W1128 06:52:57.510473 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21211ec8_3baf_4230_9cd8_c641f6bdc0e1.slice/crio-f2d48382cee011cde4fc28f77e097af7ca2dd5a9bc4e42fb6598692e9db715c1 WatchSource:0}: Error finding container f2d48382cee011cde4fc28f77e097af7ca2dd5a9bc4e42fb6598692e9db715c1: Status 404 returned error can't find the container with id f2d48382cee011cde4fc28f77e097af7ca2dd5a9bc4e42fb6598692e9db715c1 Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.609826 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-z5d9x" event={"ID":"21211ec8-3baf-4230-9cd8-c641f6bdc0e1","Type":"ContainerStarted","Data":"f2d48382cee011cde4fc28f77e097af7ca2dd5a9bc4e42fb6598692e9db715c1"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.613086 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.613145 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.613163 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.613187 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.614127 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.615959 4922 generic.go:334] "Generic (PLEG): container finished" podID="28b50482-ffec-406c-9ff9-9604bce5d5d5" containerID="b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1" exitCode=0 Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.616089 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerDied","Data":"b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.619431 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.639443 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.655736 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.680718 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.700338 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.713719 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.716701 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.716734 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.716746 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.716761 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.716772 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.723298 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.738415 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.752131 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.763172 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.775137 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.791575 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.819015 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.819065 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.819080 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.819102 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.819117 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.819124 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.858356 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.891249 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.906599 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.915649 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.921387 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.921424 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.921436 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.921452 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.921464 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:57Z","lastTransitionTime":"2025-11-28T06:52:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.927085 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.939336 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.950285 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.964764 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.974082 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:57 crc kubenswrapper[4922]: I1128 06:52:57.988405 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.001175 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:57Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.024465 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.024509 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.024526 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.024546 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.024579 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.031162 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.057824 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.057956 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.058003 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.058054 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:53:02.058022923 +0000 UTC m=+26.978418505 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.058115 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.058148 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.058176 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:02.058160827 +0000 UTC m=+26.978556419 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.058204 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:02.058194088 +0000 UTC m=+26.978589780 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.077063 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.119027 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.126673 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.126761 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.126818 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.126951 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.127010 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.155381 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.158820 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.158912 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.159091 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.159173 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.159244 4922 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.159333 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:02.159318898 +0000 UTC m=+27.079714480 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.159131 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.159457 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.159506 4922 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.159574 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:02.159565955 +0000 UTC m=+27.079961537 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.193342 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.229963 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.229996 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.230005 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.230018 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.230028 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.332876 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.332958 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.332989 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.333021 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.333046 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.398080 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.398123 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.398123 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.398362 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.398549 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:52:58 crc kubenswrapper[4922]: E1128 06:52:58.398706 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.437385 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.437454 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.437479 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.437509 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.437532 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.539957 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.540005 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.540020 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.540036 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.540049 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.625062 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-z5d9x" event={"ID":"21211ec8-3baf-4230-9cd8-c641f6bdc0e1","Type":"ContainerStarted","Data":"d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.628767 4922 generic.go:334] "Generic (PLEG): container finished" podID="28b50482-ffec-406c-9ff9-9604bce5d5d5" containerID="2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a" exitCode=0 Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.628849 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerDied","Data":"2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.642628 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.643632 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.643696 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.643712 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.643731 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.643751 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.663178 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.684863 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.702184 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.716852 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.732253 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.746690 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.746730 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.746744 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.746761 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.746773 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.758799 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.785096 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.798887 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.812700 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.828353 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.841029 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.851356 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.851402 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.851414 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.851433 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.851446 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.854580 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.865380 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.881062 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.897311 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.910117 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.928685 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.955592 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.955647 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.955660 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.955681 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.955694 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:58Z","lastTransitionTime":"2025-11-28T06:52:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.959445 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:58 crc kubenswrapper[4922]: I1128 06:52:58.999451 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.030125 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.058060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.058129 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.058151 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.058182 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.058203 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.074775 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.116259 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.151948 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.160640 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.160694 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.160712 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.160737 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.160763 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.193551 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.236005 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.263892 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.263952 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.263970 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.263997 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.264017 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.277908 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.316097 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.366596 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.366668 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.366687 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.366715 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.366733 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.469634 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.469692 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.469710 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.469735 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.469755 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.577439 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.577525 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.577550 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.577581 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.577604 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.634144 4922 generic.go:334] "Generic (PLEG): container finished" podID="28b50482-ffec-406c-9ff9-9604bce5d5d5" containerID="10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5" exitCode=0 Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.634237 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerDied","Data":"10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.640381 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.671481 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.680067 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.680117 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.680132 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.680151 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.680164 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.689495 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.706281 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.732196 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.746128 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.759752 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.775148 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.784348 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.784398 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.784417 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.784437 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.784454 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.794614 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.816383 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.829474 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.844423 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.861160 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.875280 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.886378 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.886414 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.886427 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.886445 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.886457 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.890752 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:52:59Z is after 2025-08-24T17:21:41Z" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.989212 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.989565 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.989839 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.990092 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:52:59 crc kubenswrapper[4922]: I1128 06:52:59.990369 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:52:59Z","lastTransitionTime":"2025-11-28T06:52:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.093193 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.093281 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.093301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.093328 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.093348 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.196689 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.196769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.196792 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.196829 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.196851 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.300502 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.300568 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.300589 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.300644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.300664 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.398311 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.398322 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:00 crc kubenswrapper[4922]: E1128 06:53:00.398688 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:00 crc kubenswrapper[4922]: E1128 06:53:00.398509 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.398322 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:00 crc kubenswrapper[4922]: E1128 06:53:00.398898 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.403380 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.403428 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.403446 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.403468 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.403487 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.507063 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.507137 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.507159 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.507189 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.507210 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.610138 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.610256 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.610288 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.610316 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.610334 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.647764 4922 generic.go:334] "Generic (PLEG): container finished" podID="28b50482-ffec-406c-9ff9-9604bce5d5d5" containerID="f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1" exitCode=0 Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.647826 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerDied","Data":"f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.672891 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.694742 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.712962 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.715387 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.716009 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.716027 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.716052 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.716069 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.742625 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.766491 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.785729 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.805112 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.819458 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.819500 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.819516 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.819538 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.819554 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.832173 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.850440 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.864537 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.885727 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.904884 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.923863 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.923900 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.923912 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.923929 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.923942 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:00Z","lastTransitionTime":"2025-11-28T06:53:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.926010 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:00 crc kubenswrapper[4922]: I1128 06:53:00.951527 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:00Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.027459 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.027528 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.027551 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.027581 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.027603 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.130716 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.130776 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.130802 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.130830 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.130854 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.234397 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.234459 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.234477 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.234501 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.234519 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.337542 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.337599 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.337616 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.337642 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.337660 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.440162 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.440200 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.440211 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.440249 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.440260 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.543166 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.543264 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.543292 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.543323 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.543346 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.646548 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.646629 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.646660 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.646690 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.646716 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.659393 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.659954 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.666630 4922 generic.go:334] "Generic (PLEG): container finished" podID="28b50482-ffec-406c-9ff9-9604bce5d5d5" containerID="8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b" exitCode=0 Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.666681 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerDied","Data":"8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.683414 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.704185 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.708114 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.739366 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.756407 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.756488 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.756525 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.756557 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.756584 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.761115 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.783628 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.801142 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.818622 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.835991 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.853400 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.859415 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.859470 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.859483 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.859502 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.859517 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.876184 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.893046 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.907751 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.923835 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.938579 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.955196 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.961818 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.961860 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.961872 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.961890 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.961904 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:01Z","lastTransitionTime":"2025-11-28T06:53:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.969427 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.978981 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:01 crc kubenswrapper[4922]: I1128 06:53:01.992668 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:01Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.005305 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.018688 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.033671 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.049760 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.063680 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.064303 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.064449 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.064466 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.064484 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.064496 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.076016 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.092567 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.097554 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.097681 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.097737 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.097820 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.097844 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.097818 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:53:10.09778842 +0000 UTC m=+35.018184042 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.097915 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:10.097895283 +0000 UTC m=+35.018290905 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.097949 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:10.097937444 +0000 UTC m=+35.018333056 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.106868 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.121955 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.146274 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.166980 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.167311 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.167444 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.167576 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.167689 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.199102 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.199194 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.199435 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.199479 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.199499 4922 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.199593 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:10.199565259 +0000 UTC m=+35.119960881 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.200017 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.200204 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.200420 4922 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.200710 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:10.20067594 +0000 UTC m=+35.121071592 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.271061 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.271325 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.271341 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.271359 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.271373 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.374124 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.374183 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.374201 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.374249 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.374269 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.397727 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.397774 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.397790 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.397895 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.398052 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:02 crc kubenswrapper[4922]: E1128 06:53:02.398195 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.477111 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.477173 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.477194 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.477261 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.477287 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.579337 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.579403 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.579424 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.579452 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.579475 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.674547 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" event={"ID":"28b50482-ffec-406c-9ff9-9604bce5d5d5","Type":"ContainerStarted","Data":"8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.674671 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.675435 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.687610 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.687681 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.687718 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.687750 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.687771 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.698480 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.706141 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.721562 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.741124 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.757519 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.771519 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.784139 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.797916 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.797976 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.797995 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.798023 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.798039 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.798041 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.821599 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.833352 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.849832 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.865489 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.877370 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.892967 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.900572 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.900618 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.900628 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.900644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.900654 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:02Z","lastTransitionTime":"2025-11-28T06:53:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.910706 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.924118 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.948112 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:02 crc kubenswrapper[4922]: I1128 06:53:02.970361 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.003272 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.003304 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.003314 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.003329 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.003341 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.003839 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:02Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.037443 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.050799 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.062375 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.075168 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.089870 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.100116 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.105423 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.105467 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.105478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.105494 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.105506 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.114314 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.139701 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.152389 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.175148 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:03Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.207884 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.207922 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.207932 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.207946 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.207955 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.310384 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.310419 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.310427 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.310441 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.310450 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.413300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.413353 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.413371 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.413391 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.413408 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.515788 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.515855 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.515878 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.515904 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.515923 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.617985 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.618009 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.618020 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.618034 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.618043 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.676884 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.720412 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.720476 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.720492 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.720517 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.720534 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.822885 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.822913 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.822921 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.822937 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.822949 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.924594 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.924630 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.924640 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.924654 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:03 crc kubenswrapper[4922]: I1128 06:53:03.924663 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:03Z","lastTransitionTime":"2025-11-28T06:53:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.026538 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.026581 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.026593 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.026609 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.026618 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.129769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.129834 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.129851 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.129875 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.129894 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.233054 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.233114 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.233137 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.233166 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.233190 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.335280 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.335343 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.335365 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.335396 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.335417 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.397623 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:04 crc kubenswrapper[4922]: E1128 06:53:04.397798 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.398312 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:04 crc kubenswrapper[4922]: E1128 06:53:04.398417 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.398492 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:04 crc kubenswrapper[4922]: E1128 06:53:04.398568 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.438032 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.438085 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.438103 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.438125 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.438142 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.541554 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.541615 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.541638 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.541666 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.541686 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.645038 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.645103 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.645135 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.645163 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.645184 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.683740 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/0.log" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.689047 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0" exitCode=1 Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.689136 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.690030 4922 scope.go:117] "RemoveContainer" containerID="67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.714401 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.736091 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.747719 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.747762 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.747781 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.747804 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.747821 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.771564 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.789456 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.831005 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.850461 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.850559 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.850577 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.850602 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.850627 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.855860 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.870406 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.882772 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.898654 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.915720 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.929934 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.945892 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.954384 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.954405 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.954418 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.954431 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.954440 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:04Z","lastTransitionTime":"2025-11-28T06:53:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.964240 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:04 crc kubenswrapper[4922]: I1128 06:53:04.976826 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:04Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.057680 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.057724 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.057735 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.057753 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.057766 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.160973 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.161019 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.161028 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.161043 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.161052 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.263401 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.263692 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.263705 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.263723 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.263739 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.366753 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.366799 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.366811 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.366833 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.366843 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.412710 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.425353 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.446077 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.459296 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.469148 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.469206 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.469266 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.469295 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.469312 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.477482 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.495404 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.512859 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.526251 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.540252 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.558118 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.572264 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.572333 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.572351 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.572381 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.572399 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.573368 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.590255 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.605169 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.619552 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.675570 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.675616 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.675630 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.675651 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.675666 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.697751 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/0.log" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.703826 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.704002 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.722954 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.743958 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.764083 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.780712 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.780751 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.782406 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.782451 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.782466 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.785645 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.816688 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.832937 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.846435 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.863795 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.884684 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.886922 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.887040 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.887066 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.887096 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.887119 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.900260 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.919056 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.934014 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.950148 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.968867 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.990584 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.990652 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.990671 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.990703 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:05 crc kubenswrapper[4922]: I1128 06:53:05.990724 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:05Z","lastTransitionTime":"2025-11-28T06:53:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.094197 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.094283 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.094300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.094328 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.094345 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.198064 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.198133 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.198157 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.198186 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.198205 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.301474 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.301538 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.301556 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.301579 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.301597 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.397660 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.397763 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.397885 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:06 crc kubenswrapper[4922]: E1128 06:53:06.398006 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:06 crc kubenswrapper[4922]: E1128 06:53:06.398154 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:06 crc kubenswrapper[4922]: E1128 06:53:06.398284 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.404825 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.404876 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.404896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.404918 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.404934 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.508199 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.508288 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.508308 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.508335 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.508354 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.611160 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.611264 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.611290 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.611326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.611348 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.707514 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.714527 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.714612 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.714635 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.714667 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.714693 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.817970 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.818042 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.818060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.818087 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.818104 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.920608 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.920686 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.920704 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.920730 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:06 crc kubenswrapper[4922]: I1128 06:53:06.920748 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:06Z","lastTransitionTime":"2025-11-28T06:53:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.023804 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.023872 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.023889 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.023914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.023931 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.127517 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.127580 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.127603 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.127629 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.127646 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.230758 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.230822 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.230840 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.230865 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.230883 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.299878 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.300021 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.300045 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.300068 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.300085 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: E1128 06:53:07.324935 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.330726 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.330779 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.330798 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.330825 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.330842 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: E1128 06:53:07.374998 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.380726 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.381010 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.381202 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.381450 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.381664 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: E1128 06:53:07.404776 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.409859 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.410105 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.410371 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.410583 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.410798 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: E1128 06:53:07.431002 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.436847 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.436921 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.436942 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.436972 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.436994 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: E1128 06:53:07.458042 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: E1128 06:53:07.458295 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.460470 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.460674 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.460852 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.461019 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.461183 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.503073 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52"] Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.503745 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.507874 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.508148 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.525047 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.551703 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.564283 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.564371 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.564392 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.564416 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.564474 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.570518 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.589568 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.623168 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.642896 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.667912 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.667997 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.668016 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.668045 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.668065 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.670701 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/31348e3e-fe58-4426-98b7-bd9dd404283b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.670782 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/31348e3e-fe58-4426-98b7-bd9dd404283b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.670894 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/31348e3e-fe58-4426-98b7-bd9dd404283b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.670942 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkhgr\" (UniqueName: \"kubernetes.io/projected/31348e3e-fe58-4426-98b7-bd9dd404283b-kube-api-access-vkhgr\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.674519 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.695513 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.720125 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.744389 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.756767 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.770996 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.771057 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.771074 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.771099 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.771117 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.771596 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/31348e3e-fe58-4426-98b7-bd9dd404283b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.771681 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkhgr\" (UniqueName: \"kubernetes.io/projected/31348e3e-fe58-4426-98b7-bd9dd404283b-kube-api-access-vkhgr\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.771759 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/31348e3e-fe58-4426-98b7-bd9dd404283b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.771806 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/31348e3e-fe58-4426-98b7-bd9dd404283b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.773693 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/31348e3e-fe58-4426-98b7-bd9dd404283b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.773716 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.775515 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/31348e3e-fe58-4426-98b7-bd9dd404283b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.784350 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/31348e3e-fe58-4426-98b7-bd9dd404283b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.795017 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.805472 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkhgr\" (UniqueName: \"kubernetes.io/projected/31348e3e-fe58-4426-98b7-bd9dd404283b-kube-api-access-vkhgr\") pod \"ovnkube-control-plane-749d76644c-n9b52\" (UID: \"31348e3e-fe58-4426-98b7-bd9dd404283b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.815682 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.825672 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.834925 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:07Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:07 crc kubenswrapper[4922]: W1128 06:53:07.851833 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31348e3e_fe58_4426_98b7_bd9dd404283b.slice/crio-9e4b665de4223189ed9faa9c49fd6df5daf80542cf04850e6e66dfa6817e10ba WatchSource:0}: Error finding container 9e4b665de4223189ed9faa9c49fd6df5daf80542cf04850e6e66dfa6817e10ba: Status 404 returned error can't find the container with id 9e4b665de4223189ed9faa9c49fd6df5daf80542cf04850e6e66dfa6817e10ba Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.874683 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.874769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.874785 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.874807 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.874822 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.978369 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.978442 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.978461 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.978488 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:07 crc kubenswrapper[4922]: I1128 06:53:07.978510 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:07Z","lastTransitionTime":"2025-11-28T06:53:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.081486 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.081558 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.081589 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.081619 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.081640 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.184918 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.184989 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.185007 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.185031 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.185051 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.289148 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.289417 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.289567 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.289761 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.289852 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.394032 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.394747 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.394847 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.395000 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.395089 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.398265 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:08 crc kubenswrapper[4922]: E1128 06:53:08.398456 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.399654 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:08 crc kubenswrapper[4922]: E1128 06:53:08.399896 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.400162 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:08 crc kubenswrapper[4922]: E1128 06:53:08.400295 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.499140 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.499263 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.499292 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.499324 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.499347 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.601234 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.601520 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.601530 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.601542 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.601551 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.703384 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.703419 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.703427 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.703439 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.703448 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.717866 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" event={"ID":"31348e3e-fe58-4426-98b7-bd9dd404283b","Type":"ContainerStarted","Data":"03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.717908 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" event={"ID":"31348e3e-fe58-4426-98b7-bd9dd404283b","Type":"ContainerStarted","Data":"07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.717920 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" event={"ID":"31348e3e-fe58-4426-98b7-bd9dd404283b","Type":"ContainerStarted","Data":"9e4b665de4223189ed9faa9c49fd6df5daf80542cf04850e6e66dfa6817e10ba"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.736630 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.757099 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.782732 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.797111 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.805264 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.805296 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.805307 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.805323 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.805336 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.811873 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.823265 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.836603 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.859763 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.875966 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.898778 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.910184 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.910244 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.910261 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.910311 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.910329 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:08Z","lastTransitionTime":"2025-11-28T06:53:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.914472 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.934632 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.954987 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.973273 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:08 crc kubenswrapper[4922]: I1128 06:53:08.991523 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.013542 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.013628 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.013658 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.013694 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.013733 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.031908 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-9kfr9"] Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.032814 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:09 crc kubenswrapper[4922]: E1128 06:53:09.032946 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.050427 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.069761 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.085971 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p26f\" (UniqueName: \"kubernetes.io/projected/709beb43-ed88-4a0a-b384-0c463e469964-kube-api-access-5p26f\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.086038 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.087728 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.101128 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.117134 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.117211 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.117264 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.117291 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.117312 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.118465 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.139961 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.159031 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.177702 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.187124 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p26f\" (UniqueName: \"kubernetes.io/projected/709beb43-ed88-4a0a-b384-0c463e469964-kube-api-access-5p26f\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.187377 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:09 crc kubenswrapper[4922]: E1128 06:53:09.187607 4922 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:09 crc kubenswrapper[4922]: E1128 06:53:09.188016 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs podName:709beb43-ed88-4a0a-b384-0c463e469964 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:09.687991159 +0000 UTC m=+34.608386781 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs") pod "network-metrics-daemon-9kfr9" (UID: "709beb43-ed88-4a0a-b384-0c463e469964") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.209039 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.220322 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.220410 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.220435 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.220466 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.220490 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.222734 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p26f\" (UniqueName: \"kubernetes.io/projected/709beb43-ed88-4a0a-b384-0c463e469964-kube-api-access-5p26f\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.231129 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.249285 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.269895 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.290287 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.305562 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.324741 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.325038 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.325204 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.325415 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.325747 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.325548 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.349841 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.428818 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.428883 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.428902 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.428929 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.428949 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.532591 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.532668 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.532688 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.532712 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.532729 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.635612 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.635668 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.635687 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.635709 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.635728 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.694002 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:09 crc kubenswrapper[4922]: E1128 06:53:09.694214 4922 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:09 crc kubenswrapper[4922]: E1128 06:53:09.694321 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs podName:709beb43-ed88-4a0a-b384-0c463e469964 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:10.694295606 +0000 UTC m=+35.614691218 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs") pod "network-metrics-daemon-9kfr9" (UID: "709beb43-ed88-4a0a-b384-0c463e469964") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.739131 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.739197 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.739269 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.739300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.739321 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.841967 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.842040 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.842064 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.842095 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.842113 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.945593 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.945637 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.945654 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.945677 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:09 crc kubenswrapper[4922]: I1128 06:53:09.945694 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:09Z","lastTransitionTime":"2025-11-28T06:53:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.048997 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.049071 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.049088 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.049115 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.049133 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.098661 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.098805 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.098918 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:53:26.098885637 +0000 UTC m=+51.019281259 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.098956 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.099038 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:26.099015811 +0000 UTC m=+51.019411433 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.099069 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.099167 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.099210 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:26.099197606 +0000 UTC m=+51.019593218 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.152186 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.152287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.152306 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.152330 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.152346 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.199785 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.199973 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.200008 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.200029 4922 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.200126 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:26.200103639 +0000 UTC m=+51.120499261 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.255613 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.255669 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.255686 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.255710 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.255728 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.300549 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.300813 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.300849 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.300871 4922 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.300939 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:26.30091775 +0000 UTC m=+51.221313362 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.358272 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.358338 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.358365 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.358398 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.358435 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.397609 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.397906 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.398092 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.398322 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.398475 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.398863 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.398998 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.399150 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.462426 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.462502 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.462519 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.462545 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.462563 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.565999 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.566090 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.566108 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.566134 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.566151 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.669163 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.669281 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.669311 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.669344 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.669365 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.705014 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.705310 4922 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: E1128 06:53:10.705501 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs podName:709beb43-ed88-4a0a-b384-0c463e469964 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:12.705470718 +0000 UTC m=+37.625866340 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs") pod "network-metrics-daemon-9kfr9" (UID: "709beb43-ed88-4a0a-b384-0c463e469964") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.773213 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.773290 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.773313 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.773342 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.773363 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.777684 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.799350 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.817416 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.839913 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.858924 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.877344 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.877544 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.877572 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.877653 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.877683 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.878819 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.911562 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.931514 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.950503 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.969317 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.981885 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.981965 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.981992 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.982022 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.982044 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:10Z","lastTransitionTime":"2025-11-28T06:53:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:10 crc kubenswrapper[4922]: I1128 06:53:10.993870 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.015107 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.040189 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.063722 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.085280 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.085327 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.085339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.085358 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.085373 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.095021 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.119289 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.141355 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.188807 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.188881 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.188899 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.188922 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.188940 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.292172 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.292290 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.292310 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.292336 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.292354 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.395574 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.395647 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.395670 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.395700 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.395723 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.499301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.499368 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.499386 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.499411 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.499428 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.602560 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.602607 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.602623 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.602645 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.602664 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.705621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.705700 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.705734 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.705763 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.705785 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.810078 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.810160 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.810188 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.810267 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.810294 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.913367 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.913421 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.913438 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.913460 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:11 crc kubenswrapper[4922]: I1128 06:53:11.913476 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:11Z","lastTransitionTime":"2025-11-28T06:53:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.016307 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.016373 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.016396 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.016424 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.016447 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.120442 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.120510 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.120548 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.120585 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.120609 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.223705 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.223740 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.223750 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.223765 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.223774 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.326342 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.326398 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.326414 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.326441 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.326463 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.403478 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.403520 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:12 crc kubenswrapper[4922]: E1128 06:53:12.403681 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.403716 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.403906 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:12 crc kubenswrapper[4922]: E1128 06:53:12.404202 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:12 crc kubenswrapper[4922]: E1128 06:53:12.404356 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:12 crc kubenswrapper[4922]: E1128 06:53:12.404621 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.429496 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.429556 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.429574 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.429604 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.429624 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.532397 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.532497 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.532523 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.532552 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.532569 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.635736 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.635807 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.635824 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.635849 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.635866 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.731605 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:12 crc kubenswrapper[4922]: E1128 06:53:12.731905 4922 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:12 crc kubenswrapper[4922]: E1128 06:53:12.732030 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs podName:709beb43-ed88-4a0a-b384-0c463e469964 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:16.731995111 +0000 UTC m=+41.652390733 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs") pod "network-metrics-daemon-9kfr9" (UID: "709beb43-ed88-4a0a-b384-0c463e469964") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.739510 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.739564 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.739583 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.739606 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.739623 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.843303 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.843376 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.843395 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.843420 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.843438 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.946594 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.946654 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.946672 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.946694 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:12 crc kubenswrapper[4922]: I1128 06:53:12.946713 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:12Z","lastTransitionTime":"2025-11-28T06:53:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.049706 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.049755 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.049796 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.049820 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.049839 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.153270 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.153355 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.153394 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.153426 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.153448 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.257139 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.257214 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.257276 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.257301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.257327 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.360039 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.360107 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.360129 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.360155 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.360173 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.462932 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.463019 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.463052 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.463087 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.463114 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.565930 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.565971 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.565983 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.565998 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.566010 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.668694 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.668777 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.668801 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.668833 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.668856 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.771749 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.771840 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.771865 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.771899 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.771926 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.875960 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.876018 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.876035 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.876060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.876078 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.979751 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.979856 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.979874 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.979898 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:13 crc kubenswrapper[4922]: I1128 06:53:13.979917 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:13Z","lastTransitionTime":"2025-11-28T06:53:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.082866 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.082960 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.082978 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.083001 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.083020 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.187123 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.187172 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.187188 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.187211 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.187262 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.291016 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.291071 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.291088 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.291110 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.291128 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.395968 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.396020 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.396036 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.396060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.396080 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.397513 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:14 crc kubenswrapper[4922]: E1128 06:53:14.397650 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.398166 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:14 crc kubenswrapper[4922]: E1128 06:53:14.398322 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.398417 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:14 crc kubenswrapper[4922]: E1128 06:53:14.398516 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.398593 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:14 crc kubenswrapper[4922]: E1128 06:53:14.398690 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.498896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.498943 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.498959 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.498982 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.498998 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.602779 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.602843 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.602861 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.602888 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.602910 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.706662 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.706752 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.706777 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.706812 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.706833 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.810747 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.810812 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.810828 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.810856 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.810874 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.914268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.914329 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.914364 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.914406 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:14 crc kubenswrapper[4922]: I1128 06:53:14.914427 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:14Z","lastTransitionTime":"2025-11-28T06:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.017175 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.017293 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.017317 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.017347 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.017368 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.120932 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.121002 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.121021 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.121047 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.121067 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.224448 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.224510 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.224532 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.224563 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.224586 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.328172 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.328307 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.328334 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.328366 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.328392 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.418411 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.431326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.431393 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.431411 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.431435 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.431454 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.452937 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.472488 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.501624 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.523117 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.535768 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.535827 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.535846 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.535871 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.535890 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.548089 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.575993 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.592693 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.612616 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.638329 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.640096 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.640194 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.640268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.640303 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.640321 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.661280 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.678197 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.699068 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.721434 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.744818 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.745209 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.745300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.745321 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.745349 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.745369 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.766537 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.849015 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.849092 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.849110 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.849138 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.849155 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.952194 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.952295 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.952314 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.952339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:15 crc kubenswrapper[4922]: I1128 06:53:15.952357 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:15Z","lastTransitionTime":"2025-11-28T06:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.055759 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.055823 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.055843 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.055871 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.055890 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.159687 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.159773 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.159791 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.159817 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.159838 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.262556 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.262621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.262640 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.262668 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.262687 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.366159 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.366272 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.366301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.366335 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.366359 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.398408 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:16 crc kubenswrapper[4922]: E1128 06:53:16.398675 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.398800 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.398838 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.398867 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:16 crc kubenswrapper[4922]: E1128 06:53:16.399039 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:16 crc kubenswrapper[4922]: E1128 06:53:16.399331 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:16 crc kubenswrapper[4922]: E1128 06:53:16.399505 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.469022 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.469093 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.469118 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.469147 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.469168 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.571861 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.571934 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.571957 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.571983 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.572002 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.675282 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.675351 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.675375 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.675400 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.675418 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.778507 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.778571 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.778599 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.778631 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.778652 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.781792 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:16 crc kubenswrapper[4922]: E1128 06:53:16.781989 4922 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:16 crc kubenswrapper[4922]: E1128 06:53:16.782076 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs podName:709beb43-ed88-4a0a-b384-0c463e469964 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:24.782049864 +0000 UTC m=+49.702445476 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs") pod "network-metrics-daemon-9kfr9" (UID: "709beb43-ed88-4a0a-b384-0c463e469964") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.881525 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.881601 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.881620 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.881645 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.881663 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.985375 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.985428 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.985441 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.985460 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:16 crc kubenswrapper[4922]: I1128 06:53:16.985472 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:16Z","lastTransitionTime":"2025-11-28T06:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.088105 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.088183 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.088206 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.088285 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.088322 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.190839 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.190896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.190916 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.190942 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.190958 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.294179 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.294286 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.294309 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.294343 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.294366 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.398920 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.398993 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.399013 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.399041 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.399057 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.501938 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.502011 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.502028 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.502054 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.502072 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.605412 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.605501 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.605527 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.605560 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.605583 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.708693 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.708804 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.708828 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.708859 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.708882 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.734533 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.734592 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.734607 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.734629 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.734645 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: E1128 06:53:17.751848 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:17Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.757183 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.757247 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.757260 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.757277 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.757290 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: E1128 06:53:17.778590 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:17Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.783386 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.783426 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.783438 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.783455 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.783466 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: E1128 06:53:17.805115 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:17Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.809836 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.809869 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.809877 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.809893 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.809903 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: E1128 06:53:17.828568 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:17Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.833607 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.833683 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.833708 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.833738 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.833760 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: E1128 06:53:17.855042 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:17Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:17 crc kubenswrapper[4922]: E1128 06:53:17.855297 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.857445 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.857500 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.857520 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.857545 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.857561 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.960960 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.961023 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.961045 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.961074 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:17 crc kubenswrapper[4922]: I1128 06:53:17.961098 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:17Z","lastTransitionTime":"2025-11-28T06:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.064817 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.064896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.064918 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.064951 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.064973 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.167884 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.167954 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.167977 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.168005 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.168028 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.273807 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.273871 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.273901 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.273926 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.273944 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.376655 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.376719 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.376744 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.376775 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.376798 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.398426 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.398499 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.398539 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:18 crc kubenswrapper[4922]: E1128 06:53:18.398642 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.398821 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:18 crc kubenswrapper[4922]: E1128 06:53:18.398969 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:18 crc kubenswrapper[4922]: E1128 06:53:18.399092 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:18 crc kubenswrapper[4922]: E1128 06:53:18.399188 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.479648 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.479714 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.479733 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.479758 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.479777 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.582795 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.582859 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.582878 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.582901 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.582922 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.692695 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.692830 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.692851 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.692878 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.692902 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.795346 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.795412 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.795428 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.795456 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.795475 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.898644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.898713 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.898729 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.898756 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:18 crc kubenswrapper[4922]: I1128 06:53:18.898775 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:18Z","lastTransitionTime":"2025-11-28T06:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.002002 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.002057 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.002080 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.002106 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.002125 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.105367 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.105423 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.105442 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.105470 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.105489 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.208805 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.208870 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.208888 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.208911 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.208930 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.312533 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.312621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.312645 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.312681 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.312705 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.415909 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.415975 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.415997 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.416022 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.416045 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.518725 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.518791 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.518810 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.518840 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.518861 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.621829 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.621879 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.621896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.621918 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.621935 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.724856 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.724914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.724933 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.724959 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.724981 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.828498 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.828574 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.828592 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.828617 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.828641 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.931521 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.931572 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.931589 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.931612 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:19 crc kubenswrapper[4922]: I1128 06:53:19.931629 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:19Z","lastTransitionTime":"2025-11-28T06:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.034458 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.034512 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.034533 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.034556 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.034571 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.138199 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.138326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.138353 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.138385 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.138417 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.241589 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.241650 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.241665 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.241687 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.241699 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.344993 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.345044 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.345061 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.345084 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.345101 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.398313 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.398385 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.398420 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:20 crc kubenswrapper[4922]: E1128 06:53:20.398602 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:20 crc kubenswrapper[4922]: E1128 06:53:20.398771 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:20 crc kubenswrapper[4922]: E1128 06:53:20.398895 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.399014 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:20 crc kubenswrapper[4922]: E1128 06:53:20.399298 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.448268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.448330 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.448350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.448375 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.448394 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.550645 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.550687 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.550698 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.550717 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.550728 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.653618 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.653705 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.653722 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.653748 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.653765 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.757290 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.757350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.757373 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.757401 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.757422 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.860735 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.860799 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.860824 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.860852 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.860873 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.964138 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.964273 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.964295 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.964361 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:20 crc kubenswrapper[4922]: I1128 06:53:20.964383 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:20Z","lastTransitionTime":"2025-11-28T06:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.067676 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.067737 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.067760 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.067788 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.067810 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.170203 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.170304 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.170330 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.170358 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.170381 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.273569 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.273633 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.273657 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.273683 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.273702 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.376180 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.376278 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.376304 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.376332 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.376353 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.479670 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.479731 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.479751 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.479779 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.479798 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.582798 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.582850 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.582866 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.582889 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.582907 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.685933 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.686008 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.686033 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.686060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.686082 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.789428 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.789497 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.789519 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.789606 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.789637 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.894146 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.894254 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.894273 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.894297 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.894318 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.998802 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.998885 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.998902 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.998927 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:21 crc kubenswrapper[4922]: I1128 06:53:21.998942 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:21Z","lastTransitionTime":"2025-11-28T06:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.102122 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.102180 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.102198 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.102255 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.102278 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.204772 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.204830 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.204851 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.204906 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.204923 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.307991 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.308118 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.308139 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.308164 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.308181 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.398755 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.398858 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.398912 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.398960 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:22 crc kubenswrapper[4922]: E1128 06:53:22.398974 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:22 crc kubenswrapper[4922]: E1128 06:53:22.399138 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:22 crc kubenswrapper[4922]: E1128 06:53:22.399352 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:22 crc kubenswrapper[4922]: E1128 06:53:22.399529 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.412321 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.412421 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.412525 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.412618 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.412696 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.443266 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.443517 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.469435 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" probeResult="failure" output="" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.489774 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" probeResult="failure" output="" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.515757 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.515825 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.515847 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.515876 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.515900 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.619204 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.619296 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.619314 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.619724 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.619782 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.723580 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.723630 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.723647 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.723668 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.723684 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.825984 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.826069 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.826087 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.826130 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.826146 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.929149 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.929195 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.929211 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.929268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:22 crc kubenswrapper[4922]: I1128 06:53:22.929290 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:22Z","lastTransitionTime":"2025-11-28T06:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.031722 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.031787 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.031811 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.031840 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.031861 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.114013 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.128477 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.135658 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.138180 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.138391 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.138424 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.138459 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.138608 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.155577 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.177196 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.195478 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.214268 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.241771 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.241819 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.241839 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.241862 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.241879 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.246355 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.264954 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.281806 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.301523 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.320513 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.335999 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.344769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.344860 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.345353 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.345452 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.345744 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.356630 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.379471 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.395735 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.414994 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.435987 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:23Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.449360 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.449406 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.449424 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.449447 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.449464 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.552493 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.552558 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.552582 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.552611 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.552633 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.655587 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.655976 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.656196 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.656478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.656679 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.760021 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.760070 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.760087 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.760113 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.760131 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.863318 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.863368 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.863769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.863815 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.863834 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.967486 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.968371 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.968412 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.968438 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:23 crc kubenswrapper[4922]: I1128 06:53:23.968456 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:23Z","lastTransitionTime":"2025-11-28T06:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.072276 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.072341 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.072366 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.072396 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.072416 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.175319 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.175405 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.175429 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.175464 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.175486 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.279177 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.279266 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.279287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.279313 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.279331 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.383260 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.383311 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.383328 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.383352 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.383369 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.398144 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:24 crc kubenswrapper[4922]: E1128 06:53:24.398339 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.398428 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:24 crc kubenswrapper[4922]: E1128 06:53:24.398513 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.398587 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:24 crc kubenswrapper[4922]: E1128 06:53:24.398679 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.398927 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:24 crc kubenswrapper[4922]: E1128 06:53:24.399273 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.486932 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.487345 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.487553 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.487713 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.487858 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.591788 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.591852 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.591872 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.591898 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.591918 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.694787 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.694851 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.694868 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.694895 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.694913 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.797882 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.797999 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.798024 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.798058 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.798082 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.876334 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:24 crc kubenswrapper[4922]: E1128 06:53:24.876576 4922 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:24 crc kubenswrapper[4922]: E1128 06:53:24.876706 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs podName:709beb43-ed88-4a0a-b384-0c463e469964 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:40.876670082 +0000 UTC m=+65.797065724 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs") pod "network-metrics-daemon-9kfr9" (UID: "709beb43-ed88-4a0a-b384-0c463e469964") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.900964 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.901085 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.901110 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.901139 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:24 crc kubenswrapper[4922]: I1128 06:53:24.901162 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:24Z","lastTransitionTime":"2025-11-28T06:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.005632 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.005718 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.005749 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.005786 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.005808 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.109088 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.109157 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.109180 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.109213 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.109268 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.214181 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.214241 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.214253 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.214267 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.214278 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.317970 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.318044 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.318069 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.318103 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.318125 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.416804 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.420979 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.421022 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.421040 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.421144 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.421318 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.436625 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.456912 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.476850 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.501723 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.524082 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.524296 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.524356 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.524374 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.524402 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.524419 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.542884 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.561609 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.598839 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.612241 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.626022 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.627390 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.627481 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.627541 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.627599 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.627651 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.645485 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.661962 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.687475 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.712854 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.731212 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.731339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.731363 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.731395 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.731418 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.734754 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.751828 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.784888 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/1.log" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.785827 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/0.log" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.790726 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b" exitCode=1 Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.790825 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.790949 4922 scope.go:117] "RemoveContainer" containerID="67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.792384 4922 scope.go:117] "RemoveContainer" containerID="3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b" Nov 28 06:53:25 crc kubenswrapper[4922]: E1128 06:53:25.792654 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\"" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.812817 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.829990 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.834310 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.834346 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.834354 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.834369 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.834378 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.847771 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.864502 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.887968 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:25Z\\\",\\\"message\\\":\\\"perator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp: failed to check if pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp is in primary UDN: could not find OVN pod annotation in map[]\\\\nI1128 06:53:25.477598 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-multus/network-metrics-daemon-9kfr9: failed to check if pod openshift-multus/network-metrics-daemon-9kfr9 is in primary UDN: could not find OVN pod annotation in map[cluster-autoscaler.kubernetes.io/enable-ds-eviction:false]\\\\nI1128 06:53:25.477616 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd: failed to check if pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd is in primary UDN: could not find OVN pod annotation in map[openshift.io/required-scc:nonroot-v2 openshift.io/scc:nonroot-v2 seccomp.security.alpha.kubernetes.io/pod:runtime/default]\\\\nE1128 06:53:25.584881 6343 shared_informer.go:316] \\\\\\\"Unhandled Error\\\\\\\" err=\\\\\\\"unable to sync caches for ovn-lb-controller\\\\\\\" logger=\\\\\\\"UnhandledError\\\\\\\"\\\\nI1128 06:53:25.586083 6343 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:53:25.586127 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.904996 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.921750 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.936294 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.937427 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.937478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.937495 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.937518 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.937538 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:25Z","lastTransitionTime":"2025-11-28T06:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.955756 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.973065 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:25 crc kubenswrapper[4922]: I1128 06:53:25.985484 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:25.999911 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:25Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.020119 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:26Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.038843 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:26Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.041718 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.041776 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.041794 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.041819 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.041840 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.064728 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:26Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.088023 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:26Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.107023 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:26Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.147896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.147950 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.147968 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.147992 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.148010 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.193049 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.193345 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:53:58.193307753 +0000 UTC m=+83.113703375 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.193399 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.193517 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:58.193489018 +0000 UTC m=+83.113884640 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.193821 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.193921 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.194133 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.194254 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:58.194205218 +0000 UTC m=+83.114600830 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.251631 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.251700 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.251719 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.251749 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.251771 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.295254 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.295440 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.295486 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.295507 4922 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.295586 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:58.29556252 +0000 UTC m=+83.215958132 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.355661 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.355728 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.355746 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.355772 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.355792 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.396549 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.396822 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.396857 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.396876 4922 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.396942 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 06:53:58.396920663 +0000 UTC m=+83.317316275 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.397640 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.397703 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.397639 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.397832 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.397897 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.398023 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.398149 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:26 crc kubenswrapper[4922]: E1128 06:53:26.398373 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.459837 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.459917 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.459946 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.459979 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.460002 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.562583 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.562619 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.562628 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.562641 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.562663 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.667189 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.667294 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.667318 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.667346 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.667363 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.771001 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.771076 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.771097 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.771124 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.771145 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.797644 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/1.log" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.874161 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.874262 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.874284 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.874313 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.874342 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.977465 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.977533 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.977550 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.977576 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:26 crc kubenswrapper[4922]: I1128 06:53:26.977595 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:26Z","lastTransitionTime":"2025-11-28T06:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.081110 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.081170 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.081188 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.081212 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.081262 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.184189 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.184301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.184320 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.184379 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.184397 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.287510 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.287554 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.287568 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.287584 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.287593 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.390499 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.390587 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.390613 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.390643 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.390660 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.493974 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.494053 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.494076 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.494111 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.494134 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.597209 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.597306 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.597328 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.597359 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.597383 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.700260 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.700341 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.700359 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.700384 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.700404 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.803503 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.803585 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.803612 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.803644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.803662 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.906584 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.906636 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.906667 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.906688 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:27 crc kubenswrapper[4922]: I1128 06:53:27.906707 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:27Z","lastTransitionTime":"2025-11-28T06:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.010357 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.010467 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.010486 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.010515 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.010533 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.103837 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.103905 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.103923 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.103946 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.103964 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.127482 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:28Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.133148 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.133203 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.133261 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.133292 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.133315 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.154548 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:28Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.160092 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.160149 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.160173 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.160583 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.160848 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.180848 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:28Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.186605 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.186656 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.186673 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.186695 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.186711 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.206674 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:28Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.211576 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.211660 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.211686 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.211718 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.211739 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.232858 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:28Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.233082 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.235094 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.235172 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.235196 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.235259 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.235280 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.338604 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.338656 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.338674 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.338696 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.338712 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.397841 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.397841 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.398145 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.397869 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.398265 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.397856 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.399019 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:28 crc kubenswrapper[4922]: E1128 06:53:28.398552 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.441275 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.441356 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.441381 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.441410 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.441429 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.544580 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.544633 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.544650 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.544672 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.544689 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.647535 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.647616 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.647642 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.647671 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.647697 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.750871 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.750928 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.750944 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.750970 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.750987 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.853928 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.853990 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.854013 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.854041 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.854062 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.964533 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.964598 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.964620 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.964653 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:28 crc kubenswrapper[4922]: I1128 06:53:28.964676 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:28Z","lastTransitionTime":"2025-11-28T06:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.067658 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.067721 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.067739 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.067765 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.067786 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.171437 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.171508 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.171532 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.171560 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.171583 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.275075 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.275157 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.275176 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.275203 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.275256 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.377863 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.377935 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.377962 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.377992 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.378015 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.480563 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.480664 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.480686 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.480718 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.480740 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.584591 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.584649 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.584668 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.584694 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.584712 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.686785 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.686847 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.686867 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.686892 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.686911 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.790098 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.790172 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.790210 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.790339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.790368 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.894631 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.894702 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.894746 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.894781 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.894812 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.998841 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.998905 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.998922 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.998949 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:29 crc kubenswrapper[4922]: I1128 06:53:29.998967 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:29Z","lastTransitionTime":"2025-11-28T06:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.102821 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.102896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.102913 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.102937 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.102955 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.205945 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.206021 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.206046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.206078 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.206105 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.308739 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.308791 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.308806 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.308825 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.308837 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.398388 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.398500 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.398588 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:30 crc kubenswrapper[4922]: E1128 06:53:30.398673 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:30 crc kubenswrapper[4922]: E1128 06:53:30.398805 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:30 crc kubenswrapper[4922]: E1128 06:53:30.398997 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.399410 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:30 crc kubenswrapper[4922]: E1128 06:53:30.399626 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.411644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.411695 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.411712 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.411738 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.411756 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.514732 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.514808 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.514827 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.514852 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.514870 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.618406 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.618537 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.618554 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.618579 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.618597 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.720923 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.720958 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.720968 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.720980 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.720988 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.822971 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.823002 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.823011 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.823023 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.823032 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.926839 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.926900 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.926918 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.926944 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:30 crc kubenswrapper[4922]: I1128 06:53:30.926963 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:30Z","lastTransitionTime":"2025-11-28T06:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.030200 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.030297 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.030316 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.030343 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.030360 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.133599 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.133654 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.133671 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.133695 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.133711 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.237094 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.237165 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.237199 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.237283 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.237313 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.340864 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.340942 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.340961 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.340988 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.341006 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.446708 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.446780 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.446797 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.446824 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.446845 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.550083 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.550158 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.550176 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.550207 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.550261 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.653200 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.653297 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.653319 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.653346 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.653368 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.755929 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.755996 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.756014 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.756046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.756071 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.859409 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.859478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.859496 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.859521 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.859539 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.962518 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.962582 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.962602 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.962627 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:31 crc kubenswrapper[4922]: I1128 06:53:31.962648 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:31Z","lastTransitionTime":"2025-11-28T06:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.066359 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.066445 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.066473 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.066500 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.066517 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.169559 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.169633 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.169658 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.169686 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.169709 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.272719 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.272779 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.272801 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.272829 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.272851 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.381965 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.382035 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.382059 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.382090 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.382113 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.398422 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.398460 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:32 crc kubenswrapper[4922]: E1128 06:53:32.398637 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.398665 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.398701 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:32 crc kubenswrapper[4922]: E1128 06:53:32.398914 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:32 crc kubenswrapper[4922]: E1128 06:53:32.399316 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:32 crc kubenswrapper[4922]: E1128 06:53:32.399577 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.485482 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.485539 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.485555 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.485576 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.485592 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.589659 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.589723 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.589740 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.589761 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.589775 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.692811 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.692886 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.692900 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.692926 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.692942 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.797131 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.797180 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.797192 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.797210 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.797254 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.900413 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.900470 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.900492 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.900520 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:32 crc kubenswrapper[4922]: I1128 06:53:32.900541 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:32Z","lastTransitionTime":"2025-11-28T06:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.003951 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.004018 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.004036 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.004061 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.004081 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.107421 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.107479 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.107503 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.107534 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.107557 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.211047 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.211122 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.211148 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.211177 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.211198 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.313405 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.313469 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.313486 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.313514 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.313532 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.416157 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.416257 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.416276 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.416310 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.416328 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.520354 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.520447 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.520471 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.520504 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.520524 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.624330 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.624414 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.624434 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.624466 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.624487 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.728622 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.728692 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.728716 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.728748 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.728774 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.831478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.831554 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.831568 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.831595 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.831616 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.934925 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.934991 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.935010 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.935037 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:33 crc kubenswrapper[4922]: I1128 06:53:33.935063 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:33Z","lastTransitionTime":"2025-11-28T06:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.038497 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.038567 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.038586 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.038612 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.038631 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.142117 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.142205 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.142278 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.142329 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.142355 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.245565 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.245625 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.245644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.245666 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.245683 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.348617 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.348693 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.348722 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.348751 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.348775 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.397820 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.397884 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.397850 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.397825 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:34 crc kubenswrapper[4922]: E1128 06:53:34.398046 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:34 crc kubenswrapper[4922]: E1128 06:53:34.398249 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:34 crc kubenswrapper[4922]: E1128 06:53:34.398399 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:34 crc kubenswrapper[4922]: E1128 06:53:34.398594 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.452861 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.453386 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.454031 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.454384 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.454535 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.557423 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.557822 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.557957 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.558082 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.558193 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.660992 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.661293 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.661421 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.661522 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.661616 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.765030 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.765362 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.765528 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.765637 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.765726 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.876779 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.881725 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.881749 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.881785 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.881807 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.984608 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.984651 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.984660 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.984676 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:34 crc kubenswrapper[4922]: I1128 06:53:34.984686 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:34Z","lastTransitionTime":"2025-11-28T06:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.088110 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.088211 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.088262 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.088296 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.088318 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.191748 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.192165 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.192516 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.192700 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.192835 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.297056 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.297114 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.297129 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.297151 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.297164 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.400107 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.400173 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.400192 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.400257 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.400283 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.418789 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.435374 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.472512 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://67c452725d82ca16f6b13f31f18d78e040d878900953e2b1c2d43afa9b4bbdc0\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:03Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1128 06:53:03.775357 6198 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 06:53:03.775409 6198 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 06:53:03.775460 6198 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:53:03.775477 6198 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:53:03.775513 6198 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 06:53:03.775617 6198 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 06:53:03.775639 6198 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 06:53:03.775644 6198 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 06:53:03.775663 6198 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 06:53:03.775679 6198 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:53:03.775682 6198 factory.go:656] Stopping watch factory\\\\nI1128 06:53:03.775691 6198 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:53:03.775699 6198 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 06:53:03.775703 6198 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 06:53:03.775714 6198 handler.go:208] Removed *v1.Namespace ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:25Z\\\",\\\"message\\\":\\\"perator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp: failed to check if pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp is in primary UDN: could not find OVN pod annotation in map[]\\\\nI1128 06:53:25.477598 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-multus/network-metrics-daemon-9kfr9: failed to check if pod openshift-multus/network-metrics-daemon-9kfr9 is in primary UDN: could not find OVN pod annotation in map[cluster-autoscaler.kubernetes.io/enable-ds-eviction:false]\\\\nI1128 06:53:25.477616 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd: failed to check if pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd is in primary UDN: could not find OVN pod annotation in map[openshift.io/required-scc:nonroot-v2 openshift.io/scc:nonroot-v2 seccomp.security.alpha.kubernetes.io/pod:runtime/default]\\\\nE1128 06:53:25.584881 6343 shared_informer.go:316] \\\\\\\"Unhandled Error\\\\\\\" err=\\\\\\\"unable to sync caches for ovn-lb-controller\\\\\\\" logger=\\\\\\\"UnhandledError\\\\\\\"\\\\nI1128 06:53:25.586083 6343 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:53:25.586127 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.492569 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.502973 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.503067 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.503111 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.503147 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.503172 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.517746 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.539884 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.560335 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.589111 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.603652 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.606620 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.606687 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.606698 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.606720 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.606734 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.619241 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.637858 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.655760 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.672753 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.690443 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.709374 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.710086 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.710141 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.710157 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.710180 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.710191 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.729135 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.746882 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:35Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.813129 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.813524 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.813534 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.813554 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.813565 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.917905 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.917970 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.917979 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.917998 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:35 crc kubenswrapper[4922]: I1128 06:53:35.918009 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:35Z","lastTransitionTime":"2025-11-28T06:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.021309 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.021382 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.021407 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.021438 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.021459 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.125581 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.125632 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.125649 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.125672 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.125694 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.228641 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.228694 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.228709 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.228730 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.228743 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.332334 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.332398 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.332418 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.332445 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.332461 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.398340 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:36 crc kubenswrapper[4922]: E1128 06:53:36.398543 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.398800 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.398861 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.398912 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:36 crc kubenswrapper[4922]: E1128 06:53:36.399256 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:36 crc kubenswrapper[4922]: E1128 06:53:36.399403 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:36 crc kubenswrapper[4922]: E1128 06:53:36.399627 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.435976 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.436029 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.436046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.436070 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.436088 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.540146 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.540185 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.540199 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.540215 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.540247 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.643581 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.643699 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.643721 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.643747 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.643766 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.746955 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.747004 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.747021 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.747042 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.747059 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.850158 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.850244 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.850262 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.850287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.850304 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.953725 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.953785 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.953803 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.953827 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:36 crc kubenswrapper[4922]: I1128 06:53:36.953844 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:36Z","lastTransitionTime":"2025-11-28T06:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.057125 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.057183 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.057199 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.057248 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.057267 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.159742 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.160098 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.160337 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.160965 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.161012 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.264378 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.264437 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.264454 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.264479 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.264495 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.367905 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.367971 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.367985 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.368010 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.368028 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.515884 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.515949 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.515966 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.515992 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.516013 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.618758 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.618816 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.618832 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.618856 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.618872 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.721935 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.721994 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.722012 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.722035 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.722051 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.825495 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.825903 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.826045 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.826192 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.826731 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.930072 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.930471 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.930605 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.930760 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:37 crc kubenswrapper[4922]: I1128 06:53:37.930902 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:37Z","lastTransitionTime":"2025-11-28T06:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.034999 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.035377 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.035933 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.036169 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.036414 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.140545 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.140598 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.140607 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.140625 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.140638 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.244185 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.244269 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.244287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.244310 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.244327 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.269522 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.269576 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.269595 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.269617 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.269634 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.289381 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:38Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.294495 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.294580 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.294594 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.294623 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.294639 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.311552 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:38Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.316705 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.316753 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.316772 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.316796 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.316814 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.338395 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:38Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.344595 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.344793 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.344956 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.345103 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.345284 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.363875 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:38Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.368981 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.369016 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.369029 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.369045 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.369056 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.384979 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:38Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.385120 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.387122 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.387159 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.387176 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.387204 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.387236 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.397755 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.397777 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.397901 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.397901 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.397949 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.398141 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.398199 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:38 crc kubenswrapper[4922]: E1128 06:53:38.398347 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.490252 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.490326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.490348 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.490381 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.490404 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.593874 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.593967 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.593993 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.594029 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.594057 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.697076 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.697146 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.697165 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.697192 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.697214 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.800427 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.800500 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.800524 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.800557 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.800576 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.904382 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.904433 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.904450 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.904474 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:38 crc kubenswrapper[4922]: I1128 06:53:38.904491 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:38Z","lastTransitionTime":"2025-11-28T06:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.009003 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.009069 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.009090 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.009120 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.009144 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.111950 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.111993 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.112004 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.112024 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.112035 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.215666 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.215738 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.215755 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.215781 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.215805 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.319246 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.319575 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.319664 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.319759 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.319850 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.422069 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.422117 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.422128 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.422146 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.422158 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.526268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.526408 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.526478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.526516 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.526576 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.629927 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.629971 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.629983 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.630000 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.630012 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.733614 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.733661 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.733672 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.733690 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.733703 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.836615 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.836677 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.836695 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.836719 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.836739 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.939435 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.939505 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.939523 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.939547 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:39 crc kubenswrapper[4922]: I1128 06:53:39.939567 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:39Z","lastTransitionTime":"2025-11-28T06:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.042078 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.042130 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.042142 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.042162 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.042175 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.144914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.144971 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.144988 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.145012 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.145028 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.247667 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.247743 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.247769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.247801 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.247825 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.350506 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.350551 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.350562 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.350580 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.350590 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.398487 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.398526 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.398584 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:40 crc kubenswrapper[4922]: E1128 06:53:40.398610 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:40 crc kubenswrapper[4922]: E1128 06:53:40.398699 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.398537 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:40 crc kubenswrapper[4922]: E1128 06:53:40.398775 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:40 crc kubenswrapper[4922]: E1128 06:53:40.398831 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.399413 4922 scope.go:117] "RemoveContainer" containerID="3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.416711 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.440452 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.452997 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.453035 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.453046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.453098 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.453112 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.467604 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.484373 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.502993 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.532809 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:25Z\\\",\\\"message\\\":\\\"perator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp: failed to check if pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp is in primary UDN: could not find OVN pod annotation in map[]\\\\nI1128 06:53:25.477598 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-multus/network-metrics-daemon-9kfr9: failed to check if pod openshift-multus/network-metrics-daemon-9kfr9 is in primary UDN: could not find OVN pod annotation in map[cluster-autoscaler.kubernetes.io/enable-ds-eviction:false]\\\\nI1128 06:53:25.477616 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd: failed to check if pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd is in primary UDN: could not find OVN pod annotation in map[openshift.io/required-scc:nonroot-v2 openshift.io/scc:nonroot-v2 seccomp.security.alpha.kubernetes.io/pod:runtime/default]\\\\nE1128 06:53:25.584881 6343 shared_informer.go:316] \\\\\\\"Unhandled Error\\\\\\\" err=\\\\\\\"unable to sync caches for ovn-lb-controller\\\\\\\" logger=\\\\\\\"UnhandledError\\\\\\\"\\\\nI1128 06:53:25.586083 6343 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:53:25.586127 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.550830 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.555583 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.555625 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.555633 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.555648 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.555658 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.566391 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.585458 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.609375 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.623816 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.643566 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.657810 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.657834 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.657846 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.657862 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.657873 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.661551 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.674365 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.689113 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.703603 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.720672 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.761152 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.761191 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.761205 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.761303 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.761317 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.857214 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/1.log" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.859447 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.860582 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.863129 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.863161 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.863175 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.863194 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.863209 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.888896 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.903410 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.918361 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.936096 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:25Z\\\",\\\"message\\\":\\\"perator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp: failed to check if pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp is in primary UDN: could not find OVN pod annotation in map[]\\\\nI1128 06:53:25.477598 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-multus/network-metrics-daemon-9kfr9: failed to check if pod openshift-multus/network-metrics-daemon-9kfr9 is in primary UDN: could not find OVN pod annotation in map[cluster-autoscaler.kubernetes.io/enable-ds-eviction:false]\\\\nI1128 06:53:25.477616 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd: failed to check if pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd is in primary UDN: could not find OVN pod annotation in map[openshift.io/required-scc:nonroot-v2 openshift.io/scc:nonroot-v2 seccomp.security.alpha.kubernetes.io/pod:runtime/default]\\\\nE1128 06:53:25.584881 6343 shared_informer.go:316] \\\\\\\"Unhandled Error\\\\\\\" err=\\\\\\\"unable to sync caches for ovn-lb-controller\\\\\\\" logger=\\\\\\\"UnhandledError\\\\\\\"\\\\nI1128 06:53:25.586083 6343 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:53:25.586127 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.948839 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.955795 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:40 crc kubenswrapper[4922]: E1128 06:53:40.955896 4922 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:40 crc kubenswrapper[4922]: E1128 06:53:40.955931 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs podName:709beb43-ed88-4a0a-b384-0c463e469964 nodeName:}" failed. No retries permitted until 2025-11-28 06:54:12.955920616 +0000 UTC m=+97.876316198 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs") pod "network-metrics-daemon-9kfr9" (UID: "709beb43-ed88-4a0a-b384-0c463e469964") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.965640 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.966476 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.966522 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.966541 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.966561 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:40 crc kubenswrapper[4922]: I1128 06:53:40.966576 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:40Z","lastTransitionTime":"2025-11-28T06:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.024095 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:40Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.034933 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.047784 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.059717 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.068560 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.068604 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.068616 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.068634 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.068646 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.071362 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.082426 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.093885 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.114153 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.128525 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.139173 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.148655 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.170268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.170394 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.170464 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.170540 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.170596 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.272781 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.272817 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.272828 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.272845 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.272858 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.375770 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.375805 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.375819 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.375837 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.375849 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.478046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.478104 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.478123 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.478144 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.478160 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.581354 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.581400 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.581417 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.581435 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.581447 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.684347 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.684396 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.684410 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.684428 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.684439 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.786620 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.787100 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.787125 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.787665 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.787702 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.864495 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/2.log" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.865409 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/1.log" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.868909 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96" exitCode=1 Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.868987 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.869072 4922 scope.go:117] "RemoveContainer" containerID="3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.870112 4922 scope.go:117] "RemoveContainer" containerID="e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96" Nov 28 06:53:41 crc kubenswrapper[4922]: E1128 06:53:41.870400 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\"" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.870943 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/0.log" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.871003 4922 generic.go:334] "Generic (PLEG): container finished" podID="b05f16bb-1729-4fd8-883a-4fb960bf4cff" containerID="099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a" exitCode=1 Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.871048 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jgzjd" event={"ID":"b05f16bb-1729-4fd8-883a-4fb960bf4cff","Type":"ContainerDied","Data":"099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.871842 4922 scope.go:117] "RemoveContainer" containerID="099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.891840 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.891908 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.891933 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.891964 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.891992 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.892482 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.913085 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:25Z\\\",\\\"message\\\":\\\"perator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp: failed to check if pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp is in primary UDN: could not find OVN pod annotation in map[]\\\\nI1128 06:53:25.477598 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-multus/network-metrics-daemon-9kfr9: failed to check if pod openshift-multus/network-metrics-daemon-9kfr9 is in primary UDN: could not find OVN pod annotation in map[cluster-autoscaler.kubernetes.io/enable-ds-eviction:false]\\\\nI1128 06:53:25.477616 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd: failed to check if pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd is in primary UDN: could not find OVN pod annotation in map[openshift.io/required-scc:nonroot-v2 openshift.io/scc:nonroot-v2 seccomp.security.alpha.kubernetes.io/pod:runtime/default]\\\\nE1128 06:53:25.584881 6343 shared_informer.go:316] \\\\\\\"Unhandled Error\\\\\\\" err=\\\\\\\"unable to sync caches for ovn-lb-controller\\\\\\\" logger=\\\\\\\"UnhandledError\\\\\\\"\\\\nI1128 06:53:25.586083 6343 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:53:25.586127 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.927956 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.942142 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.961793 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.988076 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:41Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.996099 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.996161 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.996179 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.996205 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:41 crc kubenswrapper[4922]: I1128 06:53:41.996249 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:41Z","lastTransitionTime":"2025-11-28T06:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.012725 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.026283 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.043406 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.068531 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.084670 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.096622 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.098125 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.098161 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.098175 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.098197 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.098208 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.114605 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.127163 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.144771 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.163756 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.179588 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.198164 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.200870 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.200923 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.200935 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.200952 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.200966 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.216565 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.231201 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.250835 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.264248 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.280161 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.303619 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.303679 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.303697 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.303724 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.303742 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.309360 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:25Z\\\",\\\"message\\\":\\\"perator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp: failed to check if pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp is in primary UDN: could not find OVN pod annotation in map[]\\\\nI1128 06:53:25.477598 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-multus/network-metrics-daemon-9kfr9: failed to check if pod openshift-multus/network-metrics-daemon-9kfr9 is in primary UDN: could not find OVN pod annotation in map[cluster-autoscaler.kubernetes.io/enable-ds-eviction:false]\\\\nI1128 06:53:25.477616 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd: failed to check if pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd is in primary UDN: could not find OVN pod annotation in map[openshift.io/required-scc:nonroot-v2 openshift.io/scc:nonroot-v2 seccomp.security.alpha.kubernetes.io/pod:runtime/default]\\\\nE1128 06:53:25.584881 6343 shared_informer.go:316] \\\\\\\"Unhandled Error\\\\\\\" err=\\\\\\\"unable to sync caches for ovn-lb-controller\\\\\\\" logger=\\\\\\\"UnhandledError\\\\\\\"\\\\nI1128 06:53:25.586083 6343 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:53:25.586127 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.323374 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.333444 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.350433 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.366025 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.380690 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.394520 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.397828 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.397905 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.397932 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:42 crc kubenswrapper[4922]: E1128 06:53:42.398168 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.398205 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:42 crc kubenswrapper[4922]: E1128 06:53:42.398331 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:42 crc kubenswrapper[4922]: E1128 06:53:42.398416 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:42 crc kubenswrapper[4922]: E1128 06:53:42.398556 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.405891 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.405942 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.405959 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.406027 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.406048 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.416876 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.430028 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.441793 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.464589 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.508748 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.508814 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.508833 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.508861 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.508879 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.611546 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.611590 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.611601 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.611618 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.611630 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.714440 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.714523 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.714542 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.714566 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.714584 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.818027 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.818068 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.818077 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.818091 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.818100 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.877294 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/0.log" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.877493 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jgzjd" event={"ID":"b05f16bb-1729-4fd8-883a-4fb960bf4cff","Type":"ContainerStarted","Data":"18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.880091 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/2.log" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.885284 4922 scope.go:117] "RemoveContainer" containerID="e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96" Nov 28 06:53:42 crc kubenswrapper[4922]: E1128 06:53:42.885566 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\"" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.894825 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.914800 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.920700 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.920762 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.920778 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.920838 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.920855 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:42Z","lastTransitionTime":"2025-11-28T06:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.945364 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ead32a1ed979f2f568f830c0c57ee284644824f41055dda621deb734bd7dd1b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:25Z\\\",\\\"message\\\":\\\"perator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp: failed to check if pod openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp is in primary UDN: could not find OVN pod annotation in map[]\\\\nI1128 06:53:25.477598 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-multus/network-metrics-daemon-9kfr9: failed to check if pod openshift-multus/network-metrics-daemon-9kfr9 is in primary UDN: could not find OVN pod annotation in map[cluster-autoscaler.kubernetes.io/enable-ds-eviction:false]\\\\nI1128 06:53:25.477616 6343 controller.go:257] Controller udn-host-isolation-manager: error found while processing openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd: failed to check if pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd is in primary UDN: could not find OVN pod annotation in map[openshift.io/required-scc:nonroot-v2 openshift.io/scc:nonroot-v2 seccomp.security.alpha.kubernetes.io/pod:runtime/default]\\\\nE1128 06:53:25.584881 6343 shared_informer.go:316] \\\\\\\"Unhandled Error\\\\\\\" err=\\\\\\\"unable to sync caches for ovn-lb-controller\\\\\\\" logger=\\\\\\\"UnhandledError\\\\\\\"\\\\nI1128 06:53:25.586083 6343 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:53:25.586127 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:04Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.963350 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:42 crc kubenswrapper[4922]: I1128 06:53:42.982792 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:42Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.006087 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.019572 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.024457 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.024560 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.024589 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.024633 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.024658 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.041612 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.067813 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.086133 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.103793 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.127507 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.127555 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.127572 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.127597 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.127615 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.128609 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.151728 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.170631 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.192130 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.210626 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.230102 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.230426 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.230487 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.230507 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.230534 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.230554 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.255107 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.271503 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.291965 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.309253 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.328213 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.334170 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.334250 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.334269 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.334293 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.334313 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.346275 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.358469 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.372644 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.397563 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.412770 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.428952 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.438751 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.438809 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.438827 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.438852 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.438867 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.451288 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.463248 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.476821 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.493969 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.507161 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.517095 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:43Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.541699 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.541745 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.541758 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.541777 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.541790 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.644494 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.644560 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.644579 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.644608 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.644628 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.746815 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.746857 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.746868 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.746884 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.746896 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.849551 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.849599 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.849614 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.849632 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.849644 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.952309 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.952341 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.952350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.952360 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:43 crc kubenswrapper[4922]: I1128 06:53:43.952367 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:43Z","lastTransitionTime":"2025-11-28T06:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.055699 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.055769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.055791 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.055821 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.055841 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.157977 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.158029 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.158037 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.158052 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.158062 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.260964 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.261006 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.261016 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.261036 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.261062 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.365002 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.365050 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.365061 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.365103 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.365116 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.398105 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.398165 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:44 crc kubenswrapper[4922]: E1128 06:53:44.398237 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.398295 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.398109 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:44 crc kubenswrapper[4922]: E1128 06:53:44.398461 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:44 crc kubenswrapper[4922]: E1128 06:53:44.398640 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:44 crc kubenswrapper[4922]: E1128 06:53:44.398811 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.467727 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.467792 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.467810 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.467833 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.467849 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.570246 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.570278 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.570286 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.570299 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.570307 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.673247 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.673299 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.673316 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.673338 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.673353 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.775841 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.775877 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.775888 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.775904 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.775915 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.878852 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.878909 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.878926 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.878950 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.878968 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.981939 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.981996 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.982006 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.982021 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:44 crc kubenswrapper[4922]: I1128 06:53:44.982031 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:44Z","lastTransitionTime":"2025-11-28T06:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.085448 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.085487 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.085500 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.085516 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.085525 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.188549 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.188594 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.188603 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.188617 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.188629 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.291162 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.291207 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.291234 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.291252 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.291266 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.394083 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.394393 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.394428 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.394535 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.394608 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.413096 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.433849 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.455506 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.469592 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.490003 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.496285 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.496380 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.496397 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.496415 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.496427 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.510533 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.530267 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.550109 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.568065 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.586648 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.599405 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.599433 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.599444 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.599463 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.599476 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.604153 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.651092 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.663674 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.684483 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.701241 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.701265 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.701273 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.701284 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.701293 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.703938 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.713872 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.725188 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:45Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.803950 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.804016 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.804037 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.804062 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.804080 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.907269 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.907325 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.907342 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.907368 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:45 crc kubenswrapper[4922]: I1128 06:53:45.907393 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:45Z","lastTransitionTime":"2025-11-28T06:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.010024 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.010073 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.010083 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.010102 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.010114 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.112787 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.112850 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.112867 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.112894 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.112916 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.215189 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.215313 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.215332 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.215358 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.215376 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.317468 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.317532 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.317551 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.317574 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.317593 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.398109 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.398163 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:46 crc kubenswrapper[4922]: E1128 06:53:46.398237 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:46 crc kubenswrapper[4922]: E1128 06:53:46.398369 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.398393 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:46 crc kubenswrapper[4922]: E1128 06:53:46.398450 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.398488 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:46 crc kubenswrapper[4922]: E1128 06:53:46.398536 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.419635 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.419681 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.419698 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.419721 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.419738 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.523276 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.523339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.523360 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.523392 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.523413 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.625562 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.625638 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.625661 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.625689 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.625713 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.729120 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.729176 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.729194 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.729216 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.729262 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.832264 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.832307 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.832316 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.832331 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.832340 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.935729 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.935771 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.935782 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.935798 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:46 crc kubenswrapper[4922]: I1128 06:53:46.935809 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:46Z","lastTransitionTime":"2025-11-28T06:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.038371 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.038400 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.038409 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.038421 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.038430 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.141644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.141709 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.141733 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.141759 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.141777 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.243746 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.243786 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.243797 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.243813 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.243825 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.346720 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.346790 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.346814 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.346846 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.346868 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.409868 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.450364 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.450424 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.450442 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.450464 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.450480 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.561176 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.561566 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.561585 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.561611 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.561628 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.664400 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.664444 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.664457 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.664473 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.664484 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.767508 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.767565 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.767583 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.767607 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.767624 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.869752 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.869790 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.869800 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.869815 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.869827 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.972265 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.972326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.972348 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.972373 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:47 crc kubenswrapper[4922]: I1128 06:53:47.972389 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:47Z","lastTransitionTime":"2025-11-28T06:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.074295 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.074349 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.074360 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.074378 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.074390 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.177759 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.177860 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.177885 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.177917 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.177941 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.281676 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.281737 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.281753 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.281775 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.281789 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.383946 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.384009 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.384027 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.384054 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.384077 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.397501 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.397522 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.397584 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.397615 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.397699 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.398007 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.398521 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.398753 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.486646 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.486704 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.486723 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.486746 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.486765 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.589674 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.589727 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.589744 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.589766 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.589784 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.632034 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.632101 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.632122 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.632147 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.632166 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.652998 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:48Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.658564 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.658621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.658640 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.658662 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.658679 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.684606 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:48Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.689747 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.689795 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.689809 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.689827 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.689839 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.720671 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:48Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.725590 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.725723 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.725797 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.725826 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.725849 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.746806 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:48Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.751187 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.751273 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.751295 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.751320 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.751338 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.771110 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:48Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:48 crc kubenswrapper[4922]: E1128 06:53:48.771402 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.773147 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.773210 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.773255 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.773283 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.773301 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.876411 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.876483 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.876501 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.876527 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.876545 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.979509 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.979591 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.979616 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.979655 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:48 crc kubenswrapper[4922]: I1128 06:53:48.979679 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:48Z","lastTransitionTime":"2025-11-28T06:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.082188 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.082280 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.082300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.082327 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.082343 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.185284 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.185353 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.185372 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.185399 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.185416 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.288870 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.288929 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.288947 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.288974 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.288992 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.396039 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.396099 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.396116 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.396141 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.396159 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.499081 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.499146 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.499166 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.499191 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.499208 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.602360 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.602419 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.602436 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.602460 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.602480 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.705601 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.705662 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.705678 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.705704 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.705721 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.808986 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.809046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.809063 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.809086 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.809102 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.912040 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.912107 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.912126 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.912152 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:49 crc kubenswrapper[4922]: I1128 06:53:49.912171 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:49Z","lastTransitionTime":"2025-11-28T06:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.015911 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.015968 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.015989 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.016013 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.016031 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.119721 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.119780 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.119802 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.119833 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.119854 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.223511 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.223573 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.223593 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.223619 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.223637 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.326545 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.326631 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.326654 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.326695 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.326719 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.397424 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.397451 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:50 crc kubenswrapper[4922]: E1128 06:53:50.397585 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.397618 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.397711 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:50 crc kubenswrapper[4922]: E1128 06:53:50.397934 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:50 crc kubenswrapper[4922]: E1128 06:53:50.398015 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:50 crc kubenswrapper[4922]: E1128 06:53:50.398113 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.430161 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.430322 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.430359 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.430386 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.430404 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.533838 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.533904 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.533928 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.533957 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.533979 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.638880 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.638932 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.638948 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.638972 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.638989 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.741803 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.741864 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.741881 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.741905 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.741922 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.848683 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.848780 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.848808 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.848858 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.848884 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.952558 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.952630 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.952653 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.952682 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:50 crc kubenswrapper[4922]: I1128 06:53:50.952705 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:50Z","lastTransitionTime":"2025-11-28T06:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.055843 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.055902 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.055920 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.055943 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.055959 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.159393 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.159448 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.159460 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.159478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.159491 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.263310 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.263383 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.263407 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.263437 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.263462 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.366705 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.366769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.366788 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.366831 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.366852 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.469953 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.470015 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.470040 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.470072 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.470094 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.577588 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.577671 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.577696 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.577721 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.577738 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.680242 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.680303 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.680323 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.680347 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.680364 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.783064 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.783125 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.783141 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.783167 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.783184 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.886607 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.886696 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.886720 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.886755 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.886781 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.990104 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.990167 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.990191 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.990257 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:51 crc kubenswrapper[4922]: I1128 06:53:51.990284 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:51Z","lastTransitionTime":"2025-11-28T06:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.092913 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.092974 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.092991 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.093017 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.093033 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.197058 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.197140 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.197163 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.197195 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.197255 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.300801 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.300874 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.300892 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.300917 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.300933 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.397725 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.397805 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.397850 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.397875 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:52 crc kubenswrapper[4922]: E1128 06:53:52.397985 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:52 crc kubenswrapper[4922]: E1128 06:53:52.398135 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:52 crc kubenswrapper[4922]: E1128 06:53:52.398325 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:52 crc kubenswrapper[4922]: E1128 06:53:52.398433 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.404243 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.404273 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.404282 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.404294 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.404304 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.507362 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.507445 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.507469 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.507499 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.507522 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.610178 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.610253 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.610268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.610286 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.610298 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.713942 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.714000 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.714017 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.714042 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.714060 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.816678 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.816752 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.816775 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.816806 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.816828 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.920108 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.920168 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.920186 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.920211 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:52 crc kubenswrapper[4922]: I1128 06:53:52.920254 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:52Z","lastTransitionTime":"2025-11-28T06:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.022355 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.022394 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.022408 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.022424 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.022433 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.131017 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.131119 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.131183 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.131271 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.131300 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.235066 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.235131 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.235150 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.235255 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.235313 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.338451 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.338507 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.338526 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.338549 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.338567 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.441467 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.441522 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.441536 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.441555 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.441570 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.544040 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.544096 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.544114 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.544140 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.544158 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.647471 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.647538 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.647578 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.647604 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.647621 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.750684 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.750769 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.750798 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.750867 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.750894 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.854023 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.854086 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.854104 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.854127 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.854146 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.957979 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.958032 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.958048 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.958072 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:53 crc kubenswrapper[4922]: I1128 06:53:53.958090 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:53Z","lastTransitionTime":"2025-11-28T06:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.061386 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.061451 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.061469 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.061494 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.061512 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.165473 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.165591 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.165610 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.165641 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.165658 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.268952 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.269013 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.269030 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.269054 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.269073 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.372674 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.372730 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.372747 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.372772 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.372789 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.397553 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.397613 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:54 crc kubenswrapper[4922]: E1128 06:53:54.397731 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.397634 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.397613 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:54 crc kubenswrapper[4922]: E1128 06:53:54.397872 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:54 crc kubenswrapper[4922]: E1128 06:53:54.398028 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:54 crc kubenswrapper[4922]: E1128 06:53:54.398171 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.475561 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.475659 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.475675 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.475712 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.475730 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.579813 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.579909 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.579935 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.579967 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.579992 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.682365 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.682453 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.682470 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.682492 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.682511 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.786072 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.786152 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.786174 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.786202 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.786255 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.889185 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.889288 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.889312 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.889339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.889359 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.992799 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.992864 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.992885 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.992914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:54 crc kubenswrapper[4922]: I1128 06:53:54.992934 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:54Z","lastTransitionTime":"2025-11-28T06:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.096448 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.096589 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.096614 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.096637 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.096655 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.200145 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.200207 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.200255 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.200278 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.200297 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.303086 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.303171 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.303274 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.303308 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.303325 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.407505 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.407583 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.407605 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.407636 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.407658 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.424427 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.445040 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.465747 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.500161 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.510357 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.510744 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.510767 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.510796 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.510819 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.520436 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.543097 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.560059 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc6fd62c-3407-4045-a223-d0b56212e7da\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d11064f96da0c5b3c10a95671667cd2643279cccaafadf1ae910cffa4a1613ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.580983 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.602096 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.613512 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.613709 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.613910 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.614113 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.614379 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.626303 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.643357 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.660053 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.681425 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.701752 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.718752 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.719610 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.718176 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.719810 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.720018 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.720044 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.738943 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.759255 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.784022 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:55Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.822837 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.822880 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.822896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.822919 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.822941 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.925726 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.926253 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.926398 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.926525 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:55 crc kubenswrapper[4922]: I1128 06:53:55.926651 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:55Z","lastTransitionTime":"2025-11-28T06:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.029659 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.029943 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.030102 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.030588 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.030749 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.134292 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.134340 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.134357 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.134379 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.134396 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.236843 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.236905 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.236924 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.236953 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.236971 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.340058 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.340136 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.340154 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.340180 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.340197 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.398350 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.398462 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.398378 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:56 crc kubenswrapper[4922]: E1128 06:53:56.398569 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:56 crc kubenswrapper[4922]: E1128 06:53:56.398753 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.398928 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:56 crc kubenswrapper[4922]: E1128 06:53:56.398946 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:56 crc kubenswrapper[4922]: E1128 06:53:56.399095 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.445145 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.445254 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.445273 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.445300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.445317 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.548670 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.548728 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.548747 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.548770 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.548789 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.651829 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.651891 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.651909 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.651933 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.651950 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.754992 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.755053 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.755071 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.755093 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.755112 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.858214 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.858304 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.858324 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.858349 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.858418 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.961309 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.961383 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.961409 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.961439 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:56 crc kubenswrapper[4922]: I1128 06:53:56.961462 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:56Z","lastTransitionTime":"2025-11-28T06:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.064370 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.064427 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.064444 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.064470 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.064487 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.167572 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.167636 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.167655 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.167679 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.167695 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.269918 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.269958 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.269971 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.269987 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.269999 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.372455 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.372534 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.372559 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.372625 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.372647 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.479450 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.479514 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.479568 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.479600 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.479618 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.582366 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.582445 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.582465 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.582489 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.582509 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.686172 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.686269 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.686287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.686314 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.686334 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.789353 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.789417 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.789438 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.789466 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.789491 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.892888 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.892956 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.892979 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.893008 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.893031 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.995865 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.995920 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.995936 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.995958 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:57 crc kubenswrapper[4922]: I1128 06:53:57.995975 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:57Z","lastTransitionTime":"2025-11-28T06:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.099446 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.099498 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.099515 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.099537 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.099554 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.202957 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.203004 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.203020 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.203042 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.203058 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.249660 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.249819 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.249879 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.250053 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.250335 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:02.250109631 +0000 UTC m=+147.170505243 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.250530 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:02.250481231 +0000 UTC m=+147.170876843 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.250556 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.250636 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:02.250618775 +0000 UTC m=+147.171014387 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.306650 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.306714 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.306732 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.306768 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.306786 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.351104 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.351342 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.351380 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.351401 4922 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.351477 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:02.351455585 +0000 UTC m=+147.271851207 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.397464 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.397563 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.397747 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.397875 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.397908 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.398093 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.398201 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.398350 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.403124 4922 scope.go:117] "RemoveContainer" containerID="e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.403483 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\"" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.409804 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.409844 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.409860 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.409881 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.409898 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.452415 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.452595 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.452634 4922 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.452655 4922 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.452740 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:02.452716736 +0000 UTC m=+147.373112348 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.513620 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.513692 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.513709 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.513734 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.513753 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.616802 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.616845 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.616869 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.616898 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.616921 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.720168 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.720294 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.720321 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.720350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.720370 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.823861 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.823927 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.823944 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.823968 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.823985 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.826468 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.826517 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.826533 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.826552 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.826567 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.848751 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.854602 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.854667 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.854687 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.854712 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.854729 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.874904 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.879928 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.880040 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.880113 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.880149 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.880167 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.903095 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.908646 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.908734 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.908754 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.908779 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.908797 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.928851 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.935585 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.935628 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.935645 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.935670 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.935689 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.956442 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:53:58Z is after 2025-08-24T17:21:41Z" Nov 28 06:53:58 crc kubenswrapper[4922]: E1128 06:53:58.956672 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.959116 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.959207 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.959257 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.959285 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:58 crc kubenswrapper[4922]: I1128 06:53:58.959307 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:58Z","lastTransitionTime":"2025-11-28T06:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.061947 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.062014 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.062032 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.062059 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.062079 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.165604 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.165661 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.165677 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.165700 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.165717 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.268151 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.268209 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.268250 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.268321 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.268354 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.369999 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.370059 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.370078 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.370102 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.370121 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.473104 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.473165 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.473182 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.473206 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.473255 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.576998 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.577060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.577078 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.577101 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.577118 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.680068 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.680141 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.680158 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.680184 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.680208 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.782848 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.782917 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.782935 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.782959 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.782978 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.886146 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.886403 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.886421 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.886446 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.886467 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.990436 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.990531 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.990557 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.990589 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:53:59 crc kubenswrapper[4922]: I1128 06:53:59.990615 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:53:59Z","lastTransitionTime":"2025-11-28T06:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.093387 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.093435 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.093450 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.093471 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.093487 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.197449 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.197521 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.197544 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.197575 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.197596 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.300774 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.300822 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.300831 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.300869 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.300880 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.397848 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.397891 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:00 crc kubenswrapper[4922]: E1128 06:54:00.398028 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.398073 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:00 crc kubenswrapper[4922]: E1128 06:54:00.398351 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:00 crc kubenswrapper[4922]: E1128 06:54:00.398680 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.398873 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:00 crc kubenswrapper[4922]: E1128 06:54:00.399655 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.403114 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.403164 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.403181 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.403206 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.403254 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.506930 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.506985 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.507002 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.507025 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.507042 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.610145 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.610196 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.610213 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.610275 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.610300 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.713914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.713982 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.714000 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.714026 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.714043 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.817022 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.817096 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.817121 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.817153 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.817176 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.922532 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.922587 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.922608 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.922634 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:00 crc kubenswrapper[4922]: I1128 06:54:00.922654 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:00Z","lastTransitionTime":"2025-11-28T06:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.025350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.025403 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.025420 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.025444 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.025460 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.128979 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.129044 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.129064 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.129091 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.129109 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.233006 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.233052 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.233068 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.233089 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.233105 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.335467 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.335521 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.335546 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.335575 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.335598 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.440149 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.440257 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.440322 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.440355 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.440379 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.542906 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.542973 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.543000 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.543029 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.543050 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.645554 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.645631 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.645654 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.645683 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.645708 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.748244 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.748304 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.748321 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.748369 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.748387 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.851143 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.851258 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.851279 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.851321 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.851346 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.954569 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.954627 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.954649 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.954677 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:01 crc kubenswrapper[4922]: I1128 06:54:01.954699 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:01Z","lastTransitionTime":"2025-11-28T06:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.059019 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.059092 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.059117 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.059147 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.059169 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.162512 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.162573 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.162592 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.162618 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.162638 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.265940 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.266002 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.266019 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.266043 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.266063 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.369387 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.369481 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.369500 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.369525 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.369543 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.397811 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.397890 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.397950 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.397811 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:02 crc kubenswrapper[4922]: E1128 06:54:02.398044 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:02 crc kubenswrapper[4922]: E1128 06:54:02.398288 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:02 crc kubenswrapper[4922]: E1128 06:54:02.398315 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:02 crc kubenswrapper[4922]: E1128 06:54:02.398398 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.473178 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.473273 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.473296 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.473324 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.473345 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.577178 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.577288 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.577306 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.577332 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.577349 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.680956 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.681012 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.681029 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.681053 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.681072 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.784721 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.784786 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.784810 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.784841 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.784860 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.888796 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.888863 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.888881 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.888908 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.888926 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.991463 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.991510 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.991520 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.991536 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:02 crc kubenswrapper[4922]: I1128 06:54:02.991546 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:02Z","lastTransitionTime":"2025-11-28T06:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.094750 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.094829 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.094851 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.094880 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.094902 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.197435 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.197536 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.197554 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.197579 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.197642 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.300682 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.300732 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.300751 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.300770 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.300785 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.403396 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.403461 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.403479 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.403502 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.403523 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.506314 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.506355 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.506368 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.506389 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.506402 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.610410 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.610477 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.610494 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.610522 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.610538 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.714133 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.714196 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.714213 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.714265 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.714284 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.818178 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.818283 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.818307 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.818337 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.818361 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.921982 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.922031 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.922047 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.922070 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:03 crc kubenswrapper[4922]: I1128 06:54:03.922089 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:03Z","lastTransitionTime":"2025-11-28T06:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.025022 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.025073 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.025088 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.025110 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.025123 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.127806 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.127861 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.127874 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.127894 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.127911 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.231179 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.231305 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.231323 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.231346 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.231362 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.334621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.334659 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.334669 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.334686 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.334699 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.397762 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:04 crc kubenswrapper[4922]: E1128 06:54:04.398004 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.398379 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:04 crc kubenswrapper[4922]: E1128 06:54:04.398488 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.398544 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.398642 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:04 crc kubenswrapper[4922]: E1128 06:54:04.398787 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:04 crc kubenswrapper[4922]: E1128 06:54:04.398936 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.438522 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.438590 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.438603 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.438619 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.438630 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.541707 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.541783 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.541806 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.541841 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.541864 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.645549 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.645623 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.645636 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.645663 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.645680 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.749335 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.749385 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.749396 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.749420 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.749431 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.853408 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.853485 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.853506 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.853534 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.853552 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.958110 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.958182 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.958199 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.958648 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:04 crc kubenswrapper[4922]: I1128 06:54:04.958719 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:04Z","lastTransitionTime":"2025-11-28T06:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.061340 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.061378 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.061386 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.061400 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.061411 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.164188 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.164251 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.164259 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.164272 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.164283 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.267155 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.267257 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.267274 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.267292 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.267304 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.371008 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.371067 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.371087 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.371111 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.371131 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.415963 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.433473 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.455453 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.473539 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.474874 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.474949 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.474971 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.475046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.475067 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.491938 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.515309 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.541010 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.560447 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.576623 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.578027 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.578067 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.578083 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.578110 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.578130 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.600362 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.619994 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.640214 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.658534 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.680704 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.681916 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.681986 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.682011 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.682042 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.682062 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.697764 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc6fd62c-3407-4045-a223-d0b56212e7da\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d11064f96da0c5b3c10a95671667cd2643279cccaafadf1ae910cffa4a1613ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.715606 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.734974 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.765460 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:05Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.786919 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.786981 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.786995 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.787016 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.787031 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.890575 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.890645 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.890658 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.890679 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.890693 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.994060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.994136 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.994159 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.994188 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:05 crc kubenswrapper[4922]: I1128 06:54:05.994209 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:05Z","lastTransitionTime":"2025-11-28T06:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.097257 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.097322 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.097340 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.097593 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.097632 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.200717 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.200777 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.200786 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.200807 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.200820 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.304526 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.304586 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.304596 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.304617 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.304631 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.397554 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.397610 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.397662 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:06 crc kubenswrapper[4922]: E1128 06:54:06.397766 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.397801 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:06 crc kubenswrapper[4922]: E1128 06:54:06.397962 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:06 crc kubenswrapper[4922]: E1128 06:54:06.398125 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:06 crc kubenswrapper[4922]: E1128 06:54:06.398492 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.407682 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.407758 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.407778 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.407811 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.407835 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.511007 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.511115 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.511134 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.511161 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.511177 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.615198 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.615320 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.615344 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.615371 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.615390 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.719580 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.719669 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.719695 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.719727 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.719746 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.822843 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.822898 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.822914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.822937 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.822953 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.925944 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.926105 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.926133 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.926164 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:06 crc kubenswrapper[4922]: I1128 06:54:06.926182 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:06Z","lastTransitionTime":"2025-11-28T06:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.030514 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.030578 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.030600 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.030629 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.030652 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.134827 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.134906 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.134932 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.134965 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.134991 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.239067 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.239134 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.239153 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.239180 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.239198 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.342913 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.342987 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.343010 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.343040 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.343062 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.446755 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.446859 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.446887 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.446917 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.446942 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.550173 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.550277 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.550305 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.550334 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.550356 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.653709 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.653782 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.653806 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.653835 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.653856 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.756547 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.756614 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.756637 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.756668 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.756690 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.860021 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.860095 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.860114 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.860142 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.860159 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.962749 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.962799 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.962815 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.962837 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:07 crc kubenswrapper[4922]: I1128 06:54:07.962855 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:07Z","lastTransitionTime":"2025-11-28T06:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.065287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.065317 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.065326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.065339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.065350 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.168620 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.168682 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.168704 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.168733 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.168755 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.271993 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.272051 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.272109 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.272137 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.272154 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.375829 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.375889 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.375906 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.375929 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.375946 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.397490 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.397547 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.397554 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.397635 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:08 crc kubenswrapper[4922]: E1128 06:54:08.397675 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:08 crc kubenswrapper[4922]: E1128 06:54:08.397798 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:08 crc kubenswrapper[4922]: E1128 06:54:08.397900 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:08 crc kubenswrapper[4922]: E1128 06:54:08.398035 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.479570 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.479646 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.479672 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.479702 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.479728 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.582368 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.582423 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.582442 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.582467 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.582487 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.685529 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.685602 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.685682 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.685716 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.685742 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.788326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.788403 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.788422 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.788446 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.788463 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.890873 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.890939 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.890957 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.890983 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.891005 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.971710 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.971759 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.971775 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.971797 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.971815 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:08 crc kubenswrapper[4922]: E1128 06:54:08.993779 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:08Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.999124 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.999194 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.999248 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.999280 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:08 crc kubenswrapper[4922]: I1128 06:54:08.999306 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:08Z","lastTransitionTime":"2025-11-28T06:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: E1128 06:54:09.018721 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.023951 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.024012 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.024065 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.024092 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.024113 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: E1128 06:54:09.045900 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.051486 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.051607 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.051629 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.051651 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.051694 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: E1128 06:54:09.071971 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.077035 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.077103 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.077128 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.077156 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.077176 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: E1128 06:54:09.095803 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:09Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:09 crc kubenswrapper[4922]: E1128 06:54:09.096050 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.098068 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.098142 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.098167 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.098210 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.098269 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.200711 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.200780 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.200799 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.200824 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.200843 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.304215 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.304320 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.304343 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.304372 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.304394 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.399402 4922 scope.go:117] "RemoveContainer" containerID="e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.406674 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.406723 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.406740 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.406763 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.406780 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.510644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.510965 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.510985 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.511042 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.511060 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.616066 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.616123 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.616140 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.616164 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.616180 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.718718 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.718789 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.718829 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.718861 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.718888 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.821901 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.821979 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.822005 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.822037 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.822065 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.924651 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.924710 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.924729 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.924754 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.924772 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:09Z","lastTransitionTime":"2025-11-28T06:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.990567 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/2.log" Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.992976 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af"} Nov 28 06:54:09 crc kubenswrapper[4922]: I1128 06:54:09.993576 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.012096 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.023272 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.027139 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.027181 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.027190 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.027206 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.027231 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.037444 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.049705 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc6fd62c-3407-4045-a223-d0b56212e7da\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d11064f96da0c5b3c10a95671667cd2643279cccaafadf1ae910cffa4a1613ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.064989 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.083519 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.110872 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.126361 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.129474 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.129536 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.129550 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.129571 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.129588 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.140657 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.156843 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.173465 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.186256 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.202526 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.224360 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.232795 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.232837 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.232847 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.232862 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.232873 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.238680 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.253429 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.267588 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.283138 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:10Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.335277 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.335322 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.335334 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.335351 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.335364 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.398158 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.398191 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.398335 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.398586 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:10 crc kubenswrapper[4922]: E1128 06:54:10.398596 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:10 crc kubenswrapper[4922]: E1128 06:54:10.398708 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:10 crc kubenswrapper[4922]: E1128 06:54:10.398769 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:10 crc kubenswrapper[4922]: E1128 06:54:10.398867 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.438099 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.438150 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.438162 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.438179 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.438192 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.541156 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.541248 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.541268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.541292 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.541311 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.644137 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.644201 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.644250 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.644279 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.644298 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.747812 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.747895 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.747918 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.747948 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.747972 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.851994 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.852055 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.852076 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.852105 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.852130 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.954933 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.954982 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.954996 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.955014 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.955026 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:10Z","lastTransitionTime":"2025-11-28T06:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:10 crc kubenswrapper[4922]: I1128 06:54:10.998916 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/3.log" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:10.999946 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/2.log" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.003591 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af" exitCode=1 Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.003660 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.003730 4922 scope.go:117] "RemoveContainer" containerID="e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.004651 4922 scope.go:117] "RemoveContainer" containerID="34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af" Nov 28 06:54:11 crc kubenswrapper[4922]: E1128 06:54:11.004942 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\"" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.024209 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.043191 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.058575 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.058621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.058640 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.058663 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.058682 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.063033 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.092504 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e48e9f3c53ca5775a9a467f9da512b7ee4d61cddcd9d7fc0765432f7a716ac96\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 06:53:41.329271 6771 ovn.go:134] Ensuring zone local for Pod openshift-kube-scheduler/openshift-kube-scheduler-crc in node crc\\\\nI1128 06:53:41.329277 6771 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc after 0 failed attempt(s)\\\\nI1128 06:53:41.329282 6771 default_network_controller.go:776] Recording success event on pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nF1128 06:53:41.329275 6771 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:54:10Z\\\",\\\"message\\\":\\\"nalversions/factory.go:140\\\\nI1128 06:54:10.438751 7071 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1128 06:54:10.437068 7071 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 06:54:10.438959 7071 factory.go:1336] Added *v1.Node event handler 7\\\\nI1128 06:54:10.439011 7071 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1128 06:54:10.439322 7071 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:54:10.439347 7071 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:54:10.439395 7071 factory.go:656] Stopping watch factory\\\\nI1128 06:54:10.439440 7071 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:54:10.439479 7071 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 06:54:10.439486 7071 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:54:10.440044 7071 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 06:54:10.440095 7071 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:54:10.440133 7071 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 06:54:10.440350 7071 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:54:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.109519 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.130792 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.145440 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc6fd62c-3407-4045-a223-d0b56212e7da\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d11064f96da0c5b3c10a95671667cd2643279cccaafadf1ae910cffa4a1613ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.163000 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.163056 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.163073 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.163096 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.163361 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.164317 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.184251 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.208711 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.224827 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.242826 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.262604 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.267656 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.267751 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.267771 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.267794 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.267874 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.283001 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.299417 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.317746 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.339161 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.358760 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:11Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.371364 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.371438 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.371460 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.371488 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.371509 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.475136 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.475612 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.475941 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.476274 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.476565 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.579969 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.580347 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.580513 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.580656 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.580801 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.684692 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.685744 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.685934 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.686111 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.686362 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.789126 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.789174 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.789307 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.789334 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.789354 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.892318 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.892415 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.892431 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.892453 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.892470 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.994944 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.994994 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.995011 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.995032 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:11 crc kubenswrapper[4922]: I1128 06:54:11.995051 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:11Z","lastTransitionTime":"2025-11-28T06:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.009638 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/3.log" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.015841 4922 scope.go:117] "RemoveContainer" containerID="34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af" Nov 28 06:54:12 crc kubenswrapper[4922]: E1128 06:54:12.016275 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\"" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.035014 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.053553 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.074014 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.093401 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.097886 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.097943 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.097960 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.097988 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.098006 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.111290 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.129175 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.149190 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.165277 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc6fd62c-3407-4045-a223-d0b56212e7da\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d11064f96da0c5b3c10a95671667cd2643279cccaafadf1ae910cffa4a1613ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.185141 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.201704 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.201763 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.201785 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.201814 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.201836 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.205125 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.237797 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:54:10Z\\\",\\\"message\\\":\\\"nalversions/factory.go:140\\\\nI1128 06:54:10.438751 7071 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1128 06:54:10.437068 7071 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 06:54:10.438959 7071 factory.go:1336] Added *v1.Node event handler 7\\\\nI1128 06:54:10.439011 7071 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1128 06:54:10.439322 7071 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:54:10.439347 7071 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:54:10.439395 7071 factory.go:656] Stopping watch factory\\\\nI1128 06:54:10.439440 7071 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:54:10.439479 7071 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 06:54:10.439486 7071 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:54:10.440044 7071 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 06:54:10.440095 7071 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:54:10.440133 7071 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 06:54:10.440350 7071 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:54:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.253568 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.268716 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.289215 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.304442 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.304501 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.304519 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.304544 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.304561 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.308304 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.324556 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.346576 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.366651 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:12Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.397934 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.397995 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:12 crc kubenswrapper[4922]: E1128 06:54:12.398116 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.398164 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.398292 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:12 crc kubenswrapper[4922]: E1128 06:54:12.398718 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:12 crc kubenswrapper[4922]: E1128 06:54:12.398958 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:12 crc kubenswrapper[4922]: E1128 06:54:12.399136 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.408596 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.408676 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.408703 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.408734 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.408821 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.417641 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.511325 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.511380 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.511394 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.511412 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.511424 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.614741 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.614797 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.614814 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.614839 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.614857 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.718118 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.718198 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.718252 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.718287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.718310 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.821452 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.821663 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.821918 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.822100 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.822250 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.925779 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.925861 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.925885 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.925914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:12 crc kubenswrapper[4922]: I1128 06:54:12.925935 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:12Z","lastTransitionTime":"2025-11-28T06:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.020168 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:13 crc kubenswrapper[4922]: E1128 06:54:13.020355 4922 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:54:13 crc kubenswrapper[4922]: E1128 06:54:13.020433 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs podName:709beb43-ed88-4a0a-b384-0c463e469964 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.020411348 +0000 UTC m=+161.940806970 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs") pod "network-metrics-daemon-9kfr9" (UID: "709beb43-ed88-4a0a-b384-0c463e469964") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.029269 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.029346 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.029371 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.029400 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.029422 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.132811 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.132878 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.132903 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.132931 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.132953 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.236328 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.236397 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.236415 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.236443 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.236460 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.340397 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.340481 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.340500 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.340528 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.340548 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.444117 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.444195 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.444213 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.444268 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.444288 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.547939 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.548016 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.548036 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.548063 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.548085 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.651767 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.651862 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.651881 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.651910 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.651929 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.756093 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.756183 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.756202 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.756259 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.756284 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.860508 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.860605 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.860626 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.860662 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.860684 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.965208 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.965357 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.965380 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.965410 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:13 crc kubenswrapper[4922]: I1128 06:54:13.965430 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:13Z","lastTransitionTime":"2025-11-28T06:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.068909 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.069007 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.069059 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.069099 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.069125 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.172699 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.172756 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.172773 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.172798 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.172816 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.277491 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.277581 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.277601 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.277636 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.277662 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.381830 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.381941 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.381965 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.382008 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.382034 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.398174 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.398212 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.398395 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:14 crc kubenswrapper[4922]: E1128 06:54:14.398583 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.398638 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:14 crc kubenswrapper[4922]: E1128 06:54:14.398872 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:14 crc kubenswrapper[4922]: E1128 06:54:14.399001 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:14 crc kubenswrapper[4922]: E1128 06:54:14.399102 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.484963 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.485019 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.485036 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.485063 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.485083 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.588196 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.588624 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.588874 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.589115 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.589327 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.692387 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.692453 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.692473 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.692498 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.692516 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.795856 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.795937 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.795959 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.795992 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.796018 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.899270 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.899618 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.899781 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.899972 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:14 crc kubenswrapper[4922]: I1128 06:54:14.900115 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:14Z","lastTransitionTime":"2025-11-28T06:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.003731 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.003804 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.003818 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.003850 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.003865 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.107595 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.107665 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.107681 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.107709 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.107725 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.211610 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.211661 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.211672 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.211693 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.211706 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.323251 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.323315 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.323329 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.323346 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.323381 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.414858 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e3415f94-87c0-4a05-9d74-02ac020c4d35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8db4ea0b3e2badcbe285f1758b150d335c8c1f4d92478a2de587cf9d345f3640\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5b5d5b7a735c902e218bbb524cda2f64b167fcced9cc07a47bff7e59cb55115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://900d4f306d3a3e4386262c1ee1031ad03503f6c262e9305c581397b3bf1a6a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://73c8b650a2a0ebc55d6531c14f5c6e5e4a7ffbac511aea8400c6632289aeb275\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.426970 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.427037 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.427055 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.427080 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.427100 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.435891 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://60275d4709ecec957c37c52e762628422258288394fb23bb6190e98253977288\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d7be069e52fb9de73b7d4297a34eaab68e25c764ed60172e87146259d0d0b6ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.455843 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8de96806bba4cb76f53ccf39f1188732d96955671049b550589a836ed21e0616\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.489785 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c2b668a2-712d-4ac2-b0a3-3891a7c1b9d6\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3091c94fa0eee49ec805396d2a8e082a8428e92abd834f80670173611dea16e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e2f748fca556d4adf2ab60a532744f8ae67edf55eb3804c03200b6d8ff4fa40b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c57d800639e5d3da6ddbaaa824296a41cb81406479f20297d32e46702be47e17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ed8f1d210b89e31af569d93f2678c02160323a61022fbfc705b18e078d1c900\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://81c4d4bc41526ec84fd23ce57c1aec1457d145ee86ba65f785e699bf5234e498\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://74e297c046ab6458dc1922bc05089d9fbba4972e952fefb845a81e8852217743\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://74e297c046ab6458dc1922bc05089d9fbba4972e952fefb845a81e8852217743\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6bdbf05d3df8ac497778f79f94f805b63a9a879353e4bc7e670054ae4224ed8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6bdbf05d3df8ac497778f79f94f805b63a9a879353e4bc7e670054ae4224ed8a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://9dd6a4216b760f04b166ad14dafd51d365a0e3d70bffd901a8832595314de337\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9dd6a4216b760f04b166ad14dafd51d365a0e3d70bffd901a8832595314de337\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.512961 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.530487 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.530603 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.530621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.530646 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.530666 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.532989 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0498340a-5e95-42bf-a0a6-8ac89a6b8858\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://19e8a18b97500977a51210206bba89633ff9939e989bfe1a6e471f5a23c83520\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zf7vq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-h8wk6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.555728 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d1b075-6126-476f-8318-483aeaa7b542\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"message\\\":\\\" to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1128 06:52:53.805479 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI1128 06:52:53.805349 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1128 06:52:53.807455 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1128 06:52:53.805665 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1128 06:52:53.807562 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1128 06:52:53.805922 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1764312757\\\\\\\\\\\\\\\" (2025-11-28 06:52:36 +0000 UTC to 2025-12-28 06:52:37 +0000 UTC (now=2025-11-28 06:52:53.805871396 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808356 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1764312768\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1764312768\\\\\\\\\\\\\\\" (2025-11-28 05:52:47 +0000 UTC to 2026-11-28 05:52:47 +0000 UTC (now=2025-11-28 06:52:53.808326937 +0000 UTC))\\\\\\\"\\\\nI1128 06:52:53.808434 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1128 06:52:53.808491 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1128 06:52:53.805962 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-870734072/tls.crt::/tmp/serving-cert-870734072/tls.key\\\\\\\"\\\\nI1128 06:52:53.808872 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF1128 06:52:53.810397 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.573063 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dc6fd62c-3407-4045-a223-d0b56212e7da\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d11064f96da0c5b3c10a95671667cd2643279cccaafadf1ae910cffa4a1613ec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5f6fc7cc5c7d7923b2f63059c0cfed9ebda675222e2ca864808be06f90e2907\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.592664 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4fb859dbed8c4c25965a739a2bd823f90f867a3bb629206994858cd62ba597ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.612476 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.635272 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.635332 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.635350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.635377 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.635396 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.645931 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac5c6b67-2037-400e-8e03-845b47d8ca67\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:54:10Z\\\",\\\"message\\\":\\\"nalversions/factory.go:140\\\\nI1128 06:54:10.438751 7071 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1128 06:54:10.437068 7071 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1128 06:54:10.438959 7071 factory.go:1336] Added *v1.Node event handler 7\\\\nI1128 06:54:10.439011 7071 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1128 06:54:10.439322 7071 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 06:54:10.439347 7071 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 06:54:10.439395 7071 factory.go:656] Stopping watch factory\\\\nI1128 06:54:10.439440 7071 handler.go:208] Removed *v1.Node event handler 7\\\\nI1128 06:54:10.439479 7071 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1128 06:54:10.439486 7071 handler.go:208] Removed *v1.Node event handler 2\\\\nI1128 06:54:10.440044 7071 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1128 06:54:10.440095 7071 ovnkube.go:599] Stopped ovnkube\\\\nI1128 06:54:10.440133 7071 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1128 06:54:10.440350 7071 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:54:09Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dtxdn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-7gdxt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.664783 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"31348e3e-fe58-4426-98b7-bd9dd404283b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://07e060fc8cd65cfc5a3e2ba86edb408b32996c25dac44f6f70d2f91b837e9da1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03d0e97f4ab12f9cccfefde0d5665e45e14db63343fd1273279d41a8e56b060f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vkhgr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-n9b52\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.684467 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f54a7fc3-dff2-4919-91a9-7defe17b717e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b39d53d3c00be07ad6ed44fb37961669118efd3f0a953a39e05c90c9fc30319\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf8a543b9af64068e2164c1fb7fcf8117e36a3f1a6c6e40df2a63da8fea86612\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef4b355167e11c82fb8067373d0d1b4ec5551157e683043c98f9249082795d2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:35Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.703893 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.718698 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-w9zxj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e1f29751-83a6-4469-b733-50e654026f8c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://679d64be6eca63ee652a4cb6cc5432c6ce549d1de243bdfbde115af634e770e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ghdft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-w9zxj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.739300 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-jgzjd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b05f16bb-1729-4fd8-883a-4fb960bf4cff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T06:53:41Z\\\",\\\"message\\\":\\\"2025-11-28T06:52:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2\\\\n2025-11-28T06:52:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_23e15381-4c29-4769-a2b0-29fb61b368c2 to /host/opt/cni/bin/\\\\n2025-11-28T06:52:55Z [verbose] multus-daemon started\\\\n2025-11-28T06:52:55Z [verbose] Readiness Indicator file check\\\\n2025-11-28T06:53:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwd5b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-jgzjd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.740326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.740478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.740514 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.740632 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.740666 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.761111 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-xm948" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"28b50482-ffec-406c-9ff9-9604bce5d5d5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8329d62934c99c8561a76ffc873ea44b49549e6be23697ea4003dcd42798f914\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:53:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea8d65e8ee7eb92aa290261e8f51b41b46819bc513aaca5c3bbdf3aa90a4acf1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b52be6274c308e05417b43b1a405e2156837ac5b1e751cf1bf07f1820e5072c1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e49c9118ef8a51223ca7a5ed0e966458c25e01d5fe81bffeecf85e3c958177a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://10808a5202288b64a0583c474df43f7ad4846b876c071e3797e9403416dbd4e5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f17f7a3f2eae1a08db3ab5d237156d3555583ed11bdf121c4138312913bc2ad1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:52:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c558e4eb231b58b703d70d6da8454e7128ac7fc7138897bedcbdeb9e801a08b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T06:53:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T06:53:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-76zhj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:54Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-xm948\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.776962 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-z5d9x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"21211ec8-3baf-4230-9cd8-c641f6bdc0e1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:52:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d42d2096b746c6f0a9210409101237a8b283b9baa914633aefc36971712ec634\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T06:52:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g4hdz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:52:57Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-z5d9x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.794771 4922 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"709beb43-ed88-4a0a-b384-0c463e469964\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T06:53:09Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5p26f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T06:53:09Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-9kfr9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:15Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.843751 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.843816 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.843833 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.843857 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.843877 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.947375 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.947441 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.947463 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.947495 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:15 crc kubenswrapper[4922]: I1128 06:54:15.947518 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:15Z","lastTransitionTime":"2025-11-28T06:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.049997 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.050074 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.050098 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.050129 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.050152 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.152421 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.152497 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.152519 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.152549 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.152572 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.256057 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.256115 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.256134 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.256156 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.256173 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.359783 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.359841 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.359859 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.359890 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.359913 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.398758 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.398959 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:16 crc kubenswrapper[4922]: E1128 06:54:16.399277 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.399303 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.399371 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:16 crc kubenswrapper[4922]: E1128 06:54:16.399531 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:16 crc kubenswrapper[4922]: E1128 06:54:16.399810 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:16 crc kubenswrapper[4922]: E1128 06:54:16.400028 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.462925 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.462994 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.463025 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.463094 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.463116 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.565339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.565437 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.565471 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.565485 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.565494 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.668165 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.668190 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.668198 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.668208 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.668230 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.771264 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.771342 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.771361 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.771387 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.771408 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.874572 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.874727 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.874756 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.874781 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.874799 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.978018 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.978071 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.978090 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.978204 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:16 crc kubenswrapper[4922]: I1128 06:54:16.978288 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:16Z","lastTransitionTime":"2025-11-28T06:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.082492 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.082569 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.082592 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.082621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.082640 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.185287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.185348 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.185384 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.185417 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.185440 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.288004 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.288060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.288077 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.288098 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.288116 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.391177 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.391350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.391408 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.391448 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.391489 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.494981 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.495050 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.495068 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.495097 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.495115 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.598690 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.598750 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.598767 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.598792 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.598812 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.702550 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.702619 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.702637 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.702663 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.702683 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.805760 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.805827 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.805849 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.805876 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.805894 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.908864 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.908933 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.908949 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.908976 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:17 crc kubenswrapper[4922]: I1128 06:54:17.908997 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:17Z","lastTransitionTime":"2025-11-28T06:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.012322 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.012388 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.012406 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.012431 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.012448 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.115174 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.115279 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.115301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.115326 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.115344 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.218747 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.218834 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.218860 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.218893 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.218918 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.321784 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.321863 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.321882 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.321912 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.321931 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.397795 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.397850 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.397872 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.397801 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:18 crc kubenswrapper[4922]: E1128 06:54:18.397990 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:18 crc kubenswrapper[4922]: E1128 06:54:18.398141 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:18 crc kubenswrapper[4922]: E1128 06:54:18.398336 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:18 crc kubenswrapper[4922]: E1128 06:54:18.398448 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.424463 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.424519 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.424537 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.424564 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.424582 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.528134 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.528208 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.528247 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.528280 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.528296 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.631567 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.631626 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.631636 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.631657 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.631669 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.735346 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.735435 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.735462 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.735498 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.735526 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.838365 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.838433 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.838448 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.838477 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.838491 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.941411 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.941472 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.941483 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.941510 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:18 crc kubenswrapper[4922]: I1128 06:54:18.941522 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:18Z","lastTransitionTime":"2025-11-28T06:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.045094 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.045164 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.045184 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.045213 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.045273 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.148595 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.148692 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.148717 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.148746 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.148770 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.170788 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.170902 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.170928 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.170949 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.170966 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: E1128 06:54:19.192505 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:19Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.198083 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.198146 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.198163 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.198192 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.198211 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: E1128 06:54:19.219991 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:19Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.226067 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.226147 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.226168 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.226196 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.226256 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: E1128 06:54:19.248346 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:19Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.255063 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.255124 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.255144 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.255167 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.255205 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: E1128 06:54:19.278636 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:19Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.283805 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.283865 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.283884 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.283911 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.283930 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: E1128 06:54:19.306537 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T06:54:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"3ea1e6bb-61b7-453d-b9cb-290e4e3cdf54\\\",\\\"systemUUID\\\":\\\"a83ea3b2-2af6-4e19-83ef-b63bfe4faed4\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T06:54:19Z is after 2025-08-24T17:21:41Z" Nov 28 06:54:19 crc kubenswrapper[4922]: E1128 06:54:19.306761 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.309411 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.309507 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.309572 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.309602 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.309677 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.412124 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.412188 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.412208 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.412256 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.412277 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.515440 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.515517 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.515535 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.515560 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.515581 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.619672 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.619727 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.619744 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.619766 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.619783 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.722720 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.722767 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.722821 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.722848 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.722875 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.825370 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.825478 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.825495 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.825520 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.825539 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.928404 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.928464 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.928482 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.928504 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:19 crc kubenswrapper[4922]: I1128 06:54:19.928521 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:19Z","lastTransitionTime":"2025-11-28T06:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.031438 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.031508 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.031526 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.031548 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.031570 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.135259 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.135338 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.135356 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.135383 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.135402 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.238413 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.238494 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.238520 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.238549 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.238569 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.341606 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.341673 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.341691 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.341722 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.341741 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.397678 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.397744 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:20 crc kubenswrapper[4922]: E1128 06:54:20.397860 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.397945 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.397988 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:20 crc kubenswrapper[4922]: E1128 06:54:20.398116 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:20 crc kubenswrapper[4922]: E1128 06:54:20.398093 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:20 crc kubenswrapper[4922]: E1128 06:54:20.398212 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.444894 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.444966 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.444985 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.445012 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.445031 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.548148 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.548205 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.548254 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.548281 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.548300 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.651533 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.651595 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.651618 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.651645 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.651666 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.755424 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.755492 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.755510 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.755538 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.755556 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.858681 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.858753 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.858788 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.858821 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.858844 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.961762 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.961840 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.961860 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.961884 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:20 crc kubenswrapper[4922]: I1128 06:54:20.961903 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:20Z","lastTransitionTime":"2025-11-28T06:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.064848 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.064900 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.064916 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.064938 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.064956 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.168289 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.168399 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.168420 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.168449 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.168466 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.272516 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.272590 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.272613 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.272648 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.272672 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.376437 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.376539 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.376574 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.376627 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.376651 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.480255 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.480355 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.480372 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.480397 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.480432 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.583416 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.583494 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.583515 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.583541 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.583560 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.687287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.687360 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.687382 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.687434 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.687452 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.790780 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.790839 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.790856 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.790881 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.790897 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.894215 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.894316 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.894334 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.894361 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.894378 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.997350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.997400 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.997420 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.997450 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:21 crc kubenswrapper[4922]: I1128 06:54:21.997470 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:21Z","lastTransitionTime":"2025-11-28T06:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.101335 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.101406 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.101427 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.101461 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.101489 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.204743 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.204820 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.204846 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.204877 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.204901 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.308615 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.308675 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.308685 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.308705 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.308717 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.398094 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.398126 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.398130 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:22 crc kubenswrapper[4922]: E1128 06:54:22.398366 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.398434 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:22 crc kubenswrapper[4922]: E1128 06:54:22.398722 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:22 crc kubenswrapper[4922]: E1128 06:54:22.398884 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:22 crc kubenswrapper[4922]: E1128 06:54:22.398566 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.412379 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.412434 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.412445 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.412468 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.412481 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.514954 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.515014 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.515032 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.515057 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.515075 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.617938 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.618017 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.618036 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.618060 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.618077 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.720344 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.720401 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.720420 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.720445 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.720461 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.822866 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.822920 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.822938 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.822960 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.822977 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.925210 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.925297 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.925320 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.925350 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:22 crc kubenswrapper[4922]: I1128 06:54:22.925372 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:22Z","lastTransitionTime":"2025-11-28T06:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.028159 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.028263 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.028287 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.028317 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.028338 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.131280 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.131354 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.131378 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.131406 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.131424 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.234763 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.234838 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.234873 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.234908 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.234929 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.338461 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.338526 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.338542 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.338569 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.338586 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.400863 4922 scope.go:117] "RemoveContainer" containerID="34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af" Nov 28 06:54:23 crc kubenswrapper[4922]: E1128 06:54:23.401291 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\"" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.441272 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.441323 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.441339 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.441360 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.441375 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.545094 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.545160 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.545184 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.545301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.545327 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.648803 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.648863 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.648880 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.648909 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.648926 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.751894 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.751963 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.751981 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.752009 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.752028 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.855171 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.855281 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.855300 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.855327 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.855345 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.958183 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.958304 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.958331 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.958361 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:23 crc kubenswrapper[4922]: I1128 06:54:23.958382 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:23Z","lastTransitionTime":"2025-11-28T06:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.060591 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.060644 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.060666 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.060697 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.060721 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.164181 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.164282 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.164301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.164325 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.164343 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.266623 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.266677 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.266690 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.266707 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.266718 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.374090 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.374152 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.374171 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.374196 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.374214 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.398371 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.398411 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:24 crc kubenswrapper[4922]: E1128 06:54:24.398563 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.398622 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.398712 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:24 crc kubenswrapper[4922]: E1128 06:54:24.398925 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:24 crc kubenswrapper[4922]: E1128 06:54:24.399024 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:24 crc kubenswrapper[4922]: E1128 06:54:24.399554 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.476774 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.476844 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.476863 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.476890 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.476909 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.579826 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.579896 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.579908 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.579926 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.580422 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.683004 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.683048 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.683058 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.683075 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.683087 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.787642 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.787703 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.787720 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.787750 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.787768 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.890737 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.890799 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.890816 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.890840 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.890859 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.993535 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.993586 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.993601 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.993623 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:24 crc kubenswrapper[4922]: I1128 06:54:24.993640 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:24Z","lastTransitionTime":"2025-11-28T06:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.096623 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.096668 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.096679 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.096695 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.096707 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.199309 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.199362 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.199379 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.199404 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.199422 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.302901 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.302956 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.302972 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.302995 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.303012 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.405927 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.406044 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.406063 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.406126 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.406145 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.477592 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=13.477559285 podStartE2EDuration="13.477559285s" podCreationTimestamp="2025-11-28 06:54:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.454278207 +0000 UTC m=+110.374673869" watchObservedRunningTime="2025-11-28 06:54:25.477559285 +0000 UTC m=+110.397954897" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.497317 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podStartSLOduration=91.497283365 podStartE2EDuration="1m31.497283365s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.496498533 +0000 UTC m=+110.416894135" watchObservedRunningTime="2025-11-28 06:54:25.497283365 +0000 UTC m=+110.417679017" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.508866 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.508908 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.508920 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.508938 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.508951 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.554158 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-n9b52" podStartSLOduration=91.554134269 podStartE2EDuration="1m31.554134269s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.553411209 +0000 UTC m=+110.473806821" watchObservedRunningTime="2025-11-28 06:54:25.554134269 +0000 UTC m=+110.474529871" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.578784 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=91.578755325 podStartE2EDuration="1m31.578755325s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.578066186 +0000 UTC m=+110.498461788" watchObservedRunningTime="2025-11-28 06:54:25.578755325 +0000 UTC m=+110.499150947" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.595098 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=38.59506976 podStartE2EDuration="38.59506976s" podCreationTimestamp="2025-11-28 06:53:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.593071354 +0000 UTC m=+110.513466966" watchObservedRunningTime="2025-11-28 06:54:25.59506976 +0000 UTC m=+110.515465382" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.622007 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.622048 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.622071 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.622090 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.622103 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.668000 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-xm948" podStartSLOduration=91.667981302 podStartE2EDuration="1m31.667981302s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.667516199 +0000 UTC m=+110.587911791" watchObservedRunningTime="2025-11-28 06:54:25.667981302 +0000 UTC m=+110.588376884" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.688867 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-z5d9x" podStartSLOduration=91.688846724 podStartE2EDuration="1m31.688846724s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.688563095 +0000 UTC m=+110.608958687" watchObservedRunningTime="2025-11-28 06:54:25.688846724 +0000 UTC m=+110.609242306" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.725000 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.725051 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.725064 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.725083 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.725143 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.725742 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=92.725729731 podStartE2EDuration="1m32.725729731s" podCreationTimestamp="2025-11-28 06:52:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.724297921 +0000 UTC m=+110.644693503" watchObservedRunningTime="2025-11-28 06:54:25.725729731 +0000 UTC m=+110.646125313" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.756270 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-w9zxj" podStartSLOduration=91.756248292 podStartE2EDuration="1m31.756248292s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.756027986 +0000 UTC m=+110.676423568" watchObservedRunningTime="2025-11-28 06:54:25.756248292 +0000 UTC m=+110.676643874" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.770000 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-jgzjd" podStartSLOduration=91.769978335 podStartE2EDuration="1m31.769978335s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.768838452 +0000 UTC m=+110.689234074" watchObservedRunningTime="2025-11-28 06:54:25.769978335 +0000 UTC m=+110.690373917" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.783467 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=62.783428219 podStartE2EDuration="1m2.783428219s" podCreationTimestamp="2025-11-28 06:53:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:25.781618999 +0000 UTC m=+110.702014581" watchObservedRunningTime="2025-11-28 06:54:25.783428219 +0000 UTC m=+110.703823811" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.827798 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.827835 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.827844 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.827859 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.827869 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.931254 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.931315 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.931333 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.931360 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:25 crc kubenswrapper[4922]: I1128 06:54:25.931377 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:25Z","lastTransitionTime":"2025-11-28T06:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.033812 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.033871 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.033890 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.033914 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.033932 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.136919 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.136980 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.136999 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.137022 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.137039 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.239906 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.239972 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.239991 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.240019 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.240038 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.343075 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.343122 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.343139 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.343163 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.343180 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.398809 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.398906 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.398814 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:26 crc kubenswrapper[4922]: E1128 06:54:26.399000 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.398906 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:26 crc kubenswrapper[4922]: E1128 06:54:26.399095 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:26 crc kubenswrapper[4922]: E1128 06:54:26.399262 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:26 crc kubenswrapper[4922]: E1128 06:54:26.399357 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.445484 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.445540 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.445557 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.445583 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.445601 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.548755 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.548799 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.548808 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.548824 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.548836 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.659038 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.659128 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.659155 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.659202 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.659260 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.763750 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.763821 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.763839 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.763864 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.763884 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.867523 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.867578 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.867595 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.867620 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.867638 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.971413 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.971475 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.971491 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.971519 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:26 crc kubenswrapper[4922]: I1128 06:54:26.971536 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:26Z","lastTransitionTime":"2025-11-28T06:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.073637 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.073684 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.073700 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.073724 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.073741 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.177409 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.177495 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.177517 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.177542 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.177566 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.281143 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.281244 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.281265 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.281293 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.281312 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.385059 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.385114 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.385132 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.385158 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.385177 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.488089 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.488148 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.488165 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.488188 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.488207 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.591785 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.591865 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.591888 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.591925 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.591955 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.695473 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.695542 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.695562 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.695591 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.695611 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.798800 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.798859 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.798876 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.798901 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.798919 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.902099 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.902157 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.902173 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.902197 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:27 crc kubenswrapper[4922]: I1128 06:54:27.902215 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:27Z","lastTransitionTime":"2025-11-28T06:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.005595 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.005673 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.005697 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.005726 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.005746 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.070979 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/1.log" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.071730 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/0.log" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.071818 4922 generic.go:334] "Generic (PLEG): container finished" podID="b05f16bb-1729-4fd8-883a-4fb960bf4cff" containerID="18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e" exitCode=1 Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.071866 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jgzjd" event={"ID":"b05f16bb-1729-4fd8-883a-4fb960bf4cff","Type":"ContainerDied","Data":"18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.071914 4922 scope.go:117] "RemoveContainer" containerID="099001160eba2d5545fdd42a6fac2b9842549a64b5d3fe093c274e073a156e5a" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.072487 4922 scope.go:117] "RemoveContainer" containerID="18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e" Nov 28 06:54:28 crc kubenswrapper[4922]: E1128 06:54:28.072759 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-jgzjd_openshift-multus(b05f16bb-1729-4fd8-883a-4fb960bf4cff)\"" pod="openshift-multus/multus-jgzjd" podUID="b05f16bb-1729-4fd8-883a-4fb960bf4cff" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.109301 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.109410 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.109429 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.109459 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.109478 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.212596 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.212662 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.212680 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.212704 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.212722 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.315536 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.315621 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.315647 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.315682 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.315708 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.397467 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:28 crc kubenswrapper[4922]: E1128 06:54:28.397679 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.397942 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.398095 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.398060 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:28 crc kubenswrapper[4922]: E1128 06:54:28.398656 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:28 crc kubenswrapper[4922]: E1128 06:54:28.398825 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:28 crc kubenswrapper[4922]: E1128 06:54:28.398908 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.419299 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.419359 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.419378 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.419401 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.419419 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.522909 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.522979 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.523006 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.523046 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.523080 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.627084 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.627175 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.627200 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.627320 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.627359 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.730852 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.730915 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.730937 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.730968 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.730990 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.833699 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.833752 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.833768 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.833789 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.833809 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.936866 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.936964 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.936989 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.937017 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:28 crc kubenswrapper[4922]: I1128 06:54:28.937045 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:28Z","lastTransitionTime":"2025-11-28T06:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.040375 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.040432 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.040449 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.040472 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.040490 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:29Z","lastTransitionTime":"2025-11-28T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.078308 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/1.log" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.144421 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.144492 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.144513 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.144546 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.144569 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:29Z","lastTransitionTime":"2025-11-28T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.248783 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.248865 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.248882 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.248908 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.248925 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:29Z","lastTransitionTime":"2025-11-28T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.351912 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.352026 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.352051 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.352082 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.352102 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:29Z","lastTransitionTime":"2025-11-28T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.455285 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.455390 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.455411 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.455487 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.455511 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:29Z","lastTransitionTime":"2025-11-28T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.558768 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.558832 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.558850 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.558873 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.558889 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:29Z","lastTransitionTime":"2025-11-28T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.575034 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.575121 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.575148 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.575183 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.575206 4922 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T06:54:29Z","lastTransitionTime":"2025-11-28T06:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.643533 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn"] Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.644057 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.646959 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.646920 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.648001 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.649264 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.716976 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.717111 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.717144 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.717175 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.717206 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.817993 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.818071 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.818131 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.818180 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.818245 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.818447 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.818672 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.819875 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.832274 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.847499 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe41ee9b-17f6-471e-8b24-e0ccefe47d4d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-xdjdn\" (UID: \"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:29 crc kubenswrapper[4922]: I1128 06:54:29.967071 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" Nov 28 06:54:30 crc kubenswrapper[4922]: I1128 06:54:30.083967 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" event={"ID":"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d","Type":"ContainerStarted","Data":"d1a9d6ae26d2a4037b539a137cd85362e4d58b929a7df6996b2f8a7d0b69a40a"} Nov 28 06:54:30 crc kubenswrapper[4922]: I1128 06:54:30.398028 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:30 crc kubenswrapper[4922]: E1128 06:54:30.398495 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:30 crc kubenswrapper[4922]: I1128 06:54:30.398103 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:30 crc kubenswrapper[4922]: I1128 06:54:30.398045 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:30 crc kubenswrapper[4922]: E1128 06:54:30.398575 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:30 crc kubenswrapper[4922]: I1128 06:54:30.398175 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:30 crc kubenswrapper[4922]: E1128 06:54:30.398748 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:30 crc kubenswrapper[4922]: E1128 06:54:30.398908 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:31 crc kubenswrapper[4922]: I1128 06:54:31.091327 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" event={"ID":"fe41ee9b-17f6-471e-8b24-e0ccefe47d4d","Type":"ContainerStarted","Data":"d4926e8e2764786a986cac9acf4afd217dfadf2ad5aea2592df537046c8edd47"} Nov 28 06:54:31 crc kubenswrapper[4922]: I1128 06:54:31.116387 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-xdjdn" podStartSLOduration=97.116359425 podStartE2EDuration="1m37.116359425s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:31.115075239 +0000 UTC m=+116.035470881" watchObservedRunningTime="2025-11-28 06:54:31.116359425 +0000 UTC m=+116.036755047" Nov 28 06:54:32 crc kubenswrapper[4922]: I1128 06:54:32.398438 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:32 crc kubenswrapper[4922]: I1128 06:54:32.398588 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:32 crc kubenswrapper[4922]: I1128 06:54:32.398660 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:32 crc kubenswrapper[4922]: I1128 06:54:32.398519 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:32 crc kubenswrapper[4922]: E1128 06:54:32.398718 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:32 crc kubenswrapper[4922]: E1128 06:54:32.398833 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:32 crc kubenswrapper[4922]: E1128 06:54:32.399013 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:32 crc kubenswrapper[4922]: E1128 06:54:32.399141 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:34 crc kubenswrapper[4922]: I1128 06:54:34.397952 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:34 crc kubenswrapper[4922]: I1128 06:54:34.398071 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:34 crc kubenswrapper[4922]: I1128 06:54:34.398047 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:34 crc kubenswrapper[4922]: E1128 06:54:34.398140 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:34 crc kubenswrapper[4922]: I1128 06:54:34.398166 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:34 crc kubenswrapper[4922]: E1128 06:54:34.398326 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:34 crc kubenswrapper[4922]: E1128 06:54:34.398489 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:34 crc kubenswrapper[4922]: E1128 06:54:34.398595 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:35 crc kubenswrapper[4922]: E1128 06:54:35.359784 4922 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 28 06:54:35 crc kubenswrapper[4922]: E1128 06:54:35.499737 4922 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 06:54:36 crc kubenswrapper[4922]: I1128 06:54:36.398408 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:36 crc kubenswrapper[4922]: I1128 06:54:36.398483 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:36 crc kubenswrapper[4922]: I1128 06:54:36.398584 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:36 crc kubenswrapper[4922]: E1128 06:54:36.398795 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:36 crc kubenswrapper[4922]: I1128 06:54:36.398826 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:36 crc kubenswrapper[4922]: E1128 06:54:36.399039 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:36 crc kubenswrapper[4922]: E1128 06:54:36.399152 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:36 crc kubenswrapper[4922]: E1128 06:54:36.399300 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:38 crc kubenswrapper[4922]: I1128 06:54:38.398527 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:38 crc kubenswrapper[4922]: I1128 06:54:38.398804 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:38 crc kubenswrapper[4922]: I1128 06:54:38.398842 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:38 crc kubenswrapper[4922]: E1128 06:54:38.398999 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:38 crc kubenswrapper[4922]: E1128 06:54:38.399114 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:38 crc kubenswrapper[4922]: E1128 06:54:38.399283 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:38 crc kubenswrapper[4922]: I1128 06:54:38.399819 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:38 crc kubenswrapper[4922]: E1128 06:54:38.400160 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:38 crc kubenswrapper[4922]: I1128 06:54:38.400525 4922 scope.go:117] "RemoveContainer" containerID="34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af" Nov 28 06:54:38 crc kubenswrapper[4922]: E1128 06:54:38.400786 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-7gdxt_openshift-ovn-kubernetes(ac5c6b67-2037-400e-8e03-845b47d8ca67)\"" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" Nov 28 06:54:40 crc kubenswrapper[4922]: I1128 06:54:40.398340 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:40 crc kubenswrapper[4922]: I1128 06:54:40.398446 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:40 crc kubenswrapper[4922]: E1128 06:54:40.398552 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:40 crc kubenswrapper[4922]: I1128 06:54:40.398700 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:40 crc kubenswrapper[4922]: E1128 06:54:40.398827 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:40 crc kubenswrapper[4922]: E1128 06:54:40.399209 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:40 crc kubenswrapper[4922]: I1128 06:54:40.399471 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:40 crc kubenswrapper[4922]: E1128 06:54:40.399619 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:40 crc kubenswrapper[4922]: E1128 06:54:40.501384 4922 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 06:54:41 crc kubenswrapper[4922]: I1128 06:54:41.398851 4922 scope.go:117] "RemoveContainer" containerID="18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e" Nov 28 06:54:42 crc kubenswrapper[4922]: I1128 06:54:42.136300 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/1.log" Nov 28 06:54:42 crc kubenswrapper[4922]: I1128 06:54:42.136758 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jgzjd" event={"ID":"b05f16bb-1729-4fd8-883a-4fb960bf4cff","Type":"ContainerStarted","Data":"cd044b0293e9c0b8120f81513a27b31b65138f0396ed6a9d48e1b3c3da93f027"} Nov 28 06:54:42 crc kubenswrapper[4922]: I1128 06:54:42.397954 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:42 crc kubenswrapper[4922]: I1128 06:54:42.398020 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:42 crc kubenswrapper[4922]: E1128 06:54:42.398127 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:42 crc kubenswrapper[4922]: I1128 06:54:42.397976 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:42 crc kubenswrapper[4922]: E1128 06:54:42.398367 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:42 crc kubenswrapper[4922]: I1128 06:54:42.398403 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:42 crc kubenswrapper[4922]: E1128 06:54:42.398576 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:42 crc kubenswrapper[4922]: E1128 06:54:42.398625 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:44 crc kubenswrapper[4922]: I1128 06:54:44.398397 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:44 crc kubenswrapper[4922]: I1128 06:54:44.398455 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:44 crc kubenswrapper[4922]: I1128 06:54:44.398499 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:44 crc kubenswrapper[4922]: E1128 06:54:44.398613 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:44 crc kubenswrapper[4922]: I1128 06:54:44.398718 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:44 crc kubenswrapper[4922]: E1128 06:54:44.398904 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:44 crc kubenswrapper[4922]: E1128 06:54:44.399104 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:44 crc kubenswrapper[4922]: E1128 06:54:44.399255 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:45 crc kubenswrapper[4922]: E1128 06:54:45.502155 4922 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 06:54:46 crc kubenswrapper[4922]: I1128 06:54:46.397873 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:46 crc kubenswrapper[4922]: I1128 06:54:46.397946 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:46 crc kubenswrapper[4922]: I1128 06:54:46.398019 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:46 crc kubenswrapper[4922]: E1128 06:54:46.398252 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:46 crc kubenswrapper[4922]: I1128 06:54:46.398365 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:46 crc kubenswrapper[4922]: E1128 06:54:46.398496 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:46 crc kubenswrapper[4922]: E1128 06:54:46.398692 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:46 crc kubenswrapper[4922]: E1128 06:54:46.398856 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:48 crc kubenswrapper[4922]: I1128 06:54:48.398093 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:48 crc kubenswrapper[4922]: I1128 06:54:48.398262 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:48 crc kubenswrapper[4922]: E1128 06:54:48.398329 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:48 crc kubenswrapper[4922]: I1128 06:54:48.398395 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:48 crc kubenswrapper[4922]: E1128 06:54:48.398535 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:48 crc kubenswrapper[4922]: E1128 06:54:48.398718 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:48 crc kubenswrapper[4922]: I1128 06:54:48.398808 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:48 crc kubenswrapper[4922]: E1128 06:54:48.398946 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:50 crc kubenswrapper[4922]: I1128 06:54:50.398031 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:50 crc kubenswrapper[4922]: I1128 06:54:50.398098 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:50 crc kubenswrapper[4922]: I1128 06:54:50.398197 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:50 crc kubenswrapper[4922]: E1128 06:54:50.398435 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:50 crc kubenswrapper[4922]: I1128 06:54:50.398611 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:50 crc kubenswrapper[4922]: E1128 06:54:50.398764 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:50 crc kubenswrapper[4922]: E1128 06:54:50.398956 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:50 crc kubenswrapper[4922]: E1128 06:54:50.399183 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:50 crc kubenswrapper[4922]: E1128 06:54:50.504721 4922 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 06:54:52 crc kubenswrapper[4922]: I1128 06:54:52.397460 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:52 crc kubenswrapper[4922]: I1128 06:54:52.397526 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:52 crc kubenswrapper[4922]: E1128 06:54:52.397687 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:52 crc kubenswrapper[4922]: I1128 06:54:52.397476 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:52 crc kubenswrapper[4922]: I1128 06:54:52.397760 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:52 crc kubenswrapper[4922]: E1128 06:54:52.398060 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:52 crc kubenswrapper[4922]: E1128 06:54:52.398287 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:52 crc kubenswrapper[4922]: E1128 06:54:52.398449 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:52 crc kubenswrapper[4922]: I1128 06:54:52.399764 4922 scope.go:117] "RemoveContainer" containerID="34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af" Nov 28 06:54:53 crc kubenswrapper[4922]: I1128 06:54:53.188412 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/3.log" Nov 28 06:54:53 crc kubenswrapper[4922]: I1128 06:54:53.192462 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerStarted","Data":"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee"} Nov 28 06:54:53 crc kubenswrapper[4922]: I1128 06:54:53.814018 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-9kfr9"] Nov 28 06:54:53 crc kubenswrapper[4922]: I1128 06:54:53.814175 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:53 crc kubenswrapper[4922]: E1128 06:54:53.814347 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:54 crc kubenswrapper[4922]: I1128 06:54:54.198420 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:54:54 crc kubenswrapper[4922]: I1128 06:54:54.245321 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podStartSLOduration=120.245291656 podStartE2EDuration="2m0.245291656s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:54:54.24471536 +0000 UTC m=+139.165111012" watchObservedRunningTime="2025-11-28 06:54:54.245291656 +0000 UTC m=+139.165687278" Nov 28 06:54:54 crc kubenswrapper[4922]: I1128 06:54:54.397864 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:54 crc kubenswrapper[4922]: I1128 06:54:54.397938 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:54 crc kubenswrapper[4922]: I1128 06:54:54.397952 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:54 crc kubenswrapper[4922]: E1128 06:54:54.398080 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:54 crc kubenswrapper[4922]: E1128 06:54:54.398269 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:54 crc kubenswrapper[4922]: E1128 06:54:54.398413 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:55 crc kubenswrapper[4922]: I1128 06:54:55.398390 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:55 crc kubenswrapper[4922]: E1128 06:54:55.401033 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:55 crc kubenswrapper[4922]: E1128 06:54:55.506732 4922 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 06:54:56 crc kubenswrapper[4922]: I1128 06:54:56.397409 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:56 crc kubenswrapper[4922]: I1128 06:54:56.397486 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:56 crc kubenswrapper[4922]: E1128 06:54:56.397610 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:56 crc kubenswrapper[4922]: I1128 06:54:56.397492 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:56 crc kubenswrapper[4922]: E1128 06:54:56.397746 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:56 crc kubenswrapper[4922]: E1128 06:54:56.397846 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:57 crc kubenswrapper[4922]: I1128 06:54:57.312056 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 06:54:57 crc kubenswrapper[4922]: I1128 06:54:57.312564 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 06:54:57 crc kubenswrapper[4922]: I1128 06:54:57.398433 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:57 crc kubenswrapper[4922]: E1128 06:54:57.398660 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:54:58 crc kubenswrapper[4922]: I1128 06:54:58.397548 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:54:58 crc kubenswrapper[4922]: I1128 06:54:58.397621 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:54:58 crc kubenswrapper[4922]: E1128 06:54:58.397743 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:54:58 crc kubenswrapper[4922]: I1128 06:54:58.398335 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:54:58 crc kubenswrapper[4922]: E1128 06:54:58.398565 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:54:58 crc kubenswrapper[4922]: E1128 06:54:58.398629 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:54:59 crc kubenswrapper[4922]: I1128 06:54:59.397712 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:54:59 crc kubenswrapper[4922]: E1128 06:54:59.397916 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-9kfr9" podUID="709beb43-ed88-4a0a-b384-0c463e469964" Nov 28 06:55:00 crc kubenswrapper[4922]: I1128 06:55:00.397707 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:55:00 crc kubenswrapper[4922]: I1128 06:55:00.397776 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:55:00 crc kubenswrapper[4922]: I1128 06:55:00.397688 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:55:00 crc kubenswrapper[4922]: E1128 06:55:00.398003 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 06:55:00 crc kubenswrapper[4922]: E1128 06:55:00.398132 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 06:55:00 crc kubenswrapper[4922]: E1128 06:55:00.398319 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 06:55:01 crc kubenswrapper[4922]: I1128 06:55:01.398206 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:55:01 crc kubenswrapper[4922]: I1128 06:55:01.400434 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 06:55:01 crc kubenswrapper[4922]: I1128 06:55:01.401602 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.321099 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:02 crc kubenswrapper[4922]: E1128 06:55:02.321332 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:57:04.321289857 +0000 UTC m=+269.241685479 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.321475 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.321557 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:55:02 crc kubenswrapper[4922]: E1128 06:55:02.321625 4922 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:55:02 crc kubenswrapper[4922]: E1128 06:55:02.321724 4922 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:55:02 crc kubenswrapper[4922]: E1128 06:55:02.321755 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:57:04.321722279 +0000 UTC m=+269.242117891 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 06:55:02 crc kubenswrapper[4922]: E1128 06:55:02.321800 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 06:57:04.321775331 +0000 UTC m=+269.242170943 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.397870 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.397870 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.397901 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.401686 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.402353 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.402515 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.402619 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.422693 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.431592 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.466925 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.525123 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.531522 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:55:02 crc kubenswrapper[4922]: I1128 06:55:02.724576 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:55:02 crc kubenswrapper[4922]: W1128 06:55:02.757916 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-015e45537efe6932343cd9bcc7717019bc38146b721e4be3d73953b05f9d91b2 WatchSource:0}: Error finding container 015e45537efe6932343cd9bcc7717019bc38146b721e4be3d73953b05f9d91b2: Status 404 returned error can't find the container with id 015e45537efe6932343cd9bcc7717019bc38146b721e4be3d73953b05f9d91b2 Nov 28 06:55:02 crc kubenswrapper[4922]: W1128 06:55:02.966261 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-0a6d6e52606e61b6741e0b28d5377480b009ec88a5f6fdd489084bfd9a39fc6c WatchSource:0}: Error finding container 0a6d6e52606e61b6741e0b28d5377480b009ec88a5f6fdd489084bfd9a39fc6c: Status 404 returned error can't find the container with id 0a6d6e52606e61b6741e0b28d5377480b009ec88a5f6fdd489084bfd9a39fc6c Nov 28 06:55:03 crc kubenswrapper[4922]: I1128 06:55:03.238040 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"275db5b606e4c618354336b098a91fbb85c05f8d3bff2011669e0fdbc42a11ca"} Nov 28 06:55:03 crc kubenswrapper[4922]: I1128 06:55:03.238129 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"0a6d6e52606e61b6741e0b28d5377480b009ec88a5f6fdd489084bfd9a39fc6c"} Nov 28 06:55:03 crc kubenswrapper[4922]: I1128 06:55:03.241876 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f80b988b1da12308212056e96c4219c8b5d60860769fa2873913b9b5093578f3"} Nov 28 06:55:03 crc kubenswrapper[4922]: I1128 06:55:03.241956 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"015e45537efe6932343cd9bcc7717019bc38146b721e4be3d73953b05f9d91b2"} Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.138153 4922 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.216672 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.217596 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.220059 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.220722 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.220943 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.222478 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.223961 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.224859 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.225553 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-2xxgj"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.227086 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2xxgj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.227646 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.227704 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.227678 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.227783 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.228170 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.233023 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.234168 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.234726 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.234866 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlthx"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.235786 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.236289 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.237152 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.238171 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.247629 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.247783 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.248463 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.249415 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.258877 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.259924 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.260362 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.260790 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.260813 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.260894 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.261279 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.261300 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.261715 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.262031 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.262173 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.262205 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.262430 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.262601 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.262676 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bqwm2"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.262609 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.268562 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.269027 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.269208 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.269402 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.269557 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.269931 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.270100 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.270242 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.274292 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.274354 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.274628 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.274884 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.276978 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.277216 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.277877 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.278793 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.280132 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.280463 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.282367 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.282481 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.282968 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.283788 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.284693 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.285669 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.290090 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2t2xx"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.290517 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.290661 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.290758 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.290901 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.291056 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.291247 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.290576 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.291938 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rllbn"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.292437 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.293093 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ht2sn"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.293779 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.294185 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.294643 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.294779 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.294905 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.295020 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.295255 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.295373 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.296027 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.296357 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.296467 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.296496 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.296573 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.299682 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.299785 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.300067 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-fzxnn"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.300333 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.300464 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.300711 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.300904 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.300997 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.301361 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.303943 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.304055 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.304269 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.304336 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.304333 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.304619 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.304742 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-98r22"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.305126 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.305864 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.306830 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.306841 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-xbhv9"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.309475 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k2x5w"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319614 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b326d97a-72e4-4cbd-bea5-08613505b7e0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319665 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-client-ca\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319700 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-config\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319725 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319745 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxrpb\" (UniqueName: \"kubernetes.io/projected/49208aaf-d742-4bab-9b7e-b883e88096f6-kube-api-access-gxrpb\") pod \"downloads-7954f5f757-2xxgj\" (UID: \"49208aaf-d742-4bab-9b7e-b883e88096f6\") " pod="openshift-console/downloads-7954f5f757-2xxgj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319763 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-client-ca\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319783 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6msm\" (UniqueName: \"kubernetes.io/projected/65e269c0-8d17-410e-ae5c-4ff9ba053bf9-kube-api-access-t6msm\") pod \"cluster-samples-operator-665b6dd947-nm899\" (UID: \"65e269c0-8d17-410e-ae5c-4ff9ba053bf9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319803 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b326d97a-72e4-4cbd-bea5-08613505b7e0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319819 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fede5fe4-d38c-46de-b334-32e9f56cf110-serving-cert\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319839 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87lkg\" (UniqueName: \"kubernetes.io/projected/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-kube-api-access-87lkg\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319859 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319893 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-encryption-config\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319916 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319945 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wdk2\" (UniqueName: \"kubernetes.io/projected/d58dc2f1-e069-4b63-8371-7c6b2735adab-kube-api-access-4wdk2\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319965 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-config\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.319984 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w46t2\" (UniqueName: \"kubernetes.io/projected/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-kube-api-access-w46t2\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320006 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320025 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d58dc2f1-e069-4b63-8371-7c6b2735adab-audit-dir\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320043 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88lpg\" (UniqueName: \"kubernetes.io/projected/b326d97a-72e4-4cbd-bea5-08613505b7e0-kube-api-access-88lpg\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320062 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-serving-cert\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320092 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-etcd-client\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320122 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-serving-cert\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320146 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-audit-policies\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320166 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bz7dq\" (UniqueName: \"kubernetes.io/projected/fede5fe4-d38c-46de-b334-32e9f56cf110-kube-api-access-bz7dq\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320188 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320205 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/65e269c0-8d17-410e-ae5c-4ff9ba053bf9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nm899\" (UID: \"65e269c0-8d17-410e-ae5c-4ff9ba053bf9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.320456 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.322242 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.325258 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.325444 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.325738 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.326023 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.326141 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.328485 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.329053 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.329680 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.329796 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.329959 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.330089 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.330283 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.330575 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.331110 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.331659 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.332105 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.332503 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.333561 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.333805 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.340629 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.342583 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.343209 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.348362 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sckww"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.348768 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.348901 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.348945 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.369847 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.370163 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.370258 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-h297g"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.371402 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-s8ws4"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.372242 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.372804 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.373130 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.371004 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.372809 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.370371 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.371642 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.370335 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.374714 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.375040 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.375369 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.378202 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.378579 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-m6bkw"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.378852 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.379163 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.379640 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.379783 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.384588 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.384949 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-pp742"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.389317 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.389335 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.389705 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.392459 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.394283 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.394432 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.394570 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.399187 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.399522 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.399848 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z4zvd"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.400146 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.400530 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.400594 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.400801 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.400985 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.401151 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.401358 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.401406 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.404582 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2xxgj"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.404632 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.404783 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.409844 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.417639 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.417771 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.420527 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.420779 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlthx"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.420913 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wdk2\" (UniqueName: \"kubernetes.io/projected/d58dc2f1-e069-4b63-8371-7c6b2735adab-kube-api-access-4wdk2\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.420952 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-config\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.420979 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w46t2\" (UniqueName: \"kubernetes.io/projected/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-kube-api-access-w46t2\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421020 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bzkj\" (UniqueName: \"kubernetes.io/projected/fb28612c-20e3-4319-9db8-dae18a593a1e-kube-api-access-2bzkj\") pod \"package-server-manager-789f6589d5-h7pf2\" (UID: \"fb28612c-20e3-4319-9db8-dae18a593a1e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421043 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-etcd-serving-ca\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421066 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-default-certificate\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421087 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421110 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-encryption-config\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421133 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d58dc2f1-e069-4b63-8371-7c6b2735adab-audit-dir\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421156 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88lpg\" (UniqueName: \"kubernetes.io/projected/b326d97a-72e4-4cbd-bea5-08613505b7e0-kube-api-access-88lpg\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421180 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421204 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421247 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421269 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/87cf54e1-7498-430e-a517-6658bd9ce547-service-ca-bundle\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421291 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-serving-cert\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421309 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-etcd-client\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421335 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-stats-auth\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421366 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421392 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-serving-cert\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421415 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e77ea863-52c9-4f43-968f-73ba31a6b0de-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421438 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5xj2\" (UniqueName: \"kubernetes.io/projected/9ebbc267-ceb6-444d-b767-493e54d573b6-kube-api-access-t5xj2\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421459 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-audit\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421478 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-serving-cert\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421500 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/059ac1e5-20d6-4a32-846e-d427680a560f-proxy-tls\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421523 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421544 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-service-ca-bundle\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421562 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlmf5\" (UniqueName: \"kubernetes.io/projected/d0751583-7dc1-4547-9240-39d874a6ca87-kube-api-access-dlmf5\") pod \"multus-admission-controller-857f4d67dd-98r22\" (UID: \"d0751583-7dc1-4547-9240-39d874a6ca87\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421583 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421604 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/059ac1e5-20d6-4a32-846e-d427680a560f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421625 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421643 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8grsb\" (UniqueName: \"kubernetes.io/projected/059ac1e5-20d6-4a32-846e-d427680a560f-kube-api-access-8grsb\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421662 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/468ca9dc-6af3-47ae-8c7c-b9338ceae695-node-pullsecrets\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421684 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-424x9\" (UniqueName: \"kubernetes.io/projected/4fa6813c-f797-44b6-8779-7cfc802379fc-kube-api-access-424x9\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421705 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4fa6813c-f797-44b6-8779-7cfc802379fc-proxy-tls\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421726 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-audit-policies\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421742 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bz7dq\" (UniqueName: \"kubernetes.io/projected/fede5fe4-d38c-46de-b334-32e9f56cf110-kube-api-access-bz7dq\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421763 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.421784 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9ebbc267-ceb6-444d-b767-493e54d573b6-srv-cert\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423360 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d0751583-7dc1-4547-9240-39d874a6ca87-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-98r22\" (UID: \"d0751583-7dc1-4547-9240-39d874a6ca87\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423399 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/059ac1e5-20d6-4a32-846e-d427680a560f-images\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423424 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423448 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tlxw\" (UniqueName: \"kubernetes.io/projected/468ca9dc-6af3-47ae-8c7c-b9338ceae695-kube-api-access-5tlxw\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423472 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4fa6813c-f797-44b6-8779-7cfc802379fc-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423494 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-image-import-ca\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423513 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4f99\" (UniqueName: \"kubernetes.io/projected/87cf54e1-7498-430e-a517-6658bd9ce547-kube-api-access-t4f99\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423537 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/65e269c0-8d17-410e-ae5c-4ff9ba053bf9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nm899\" (UID: \"65e269c0-8d17-410e-ae5c-4ff9ba053bf9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423559 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e77ea863-52c9-4f43-968f-73ba31a6b0de-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423583 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b326d97a-72e4-4cbd-bea5-08613505b7e0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423606 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-client-ca\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423631 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423652 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423673 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r9qq\" (UniqueName: \"kubernetes.io/projected/7a50e5be-5b15-472d-a504-3dc449b474e6-kube-api-access-2r9qq\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423694 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-config\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423714 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-config\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423737 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d952449e-c825-43d3-a591-0be473db6a53-audit-dir\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423756 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/468ca9dc-6af3-47ae-8c7c-b9338ceae695-audit-dir\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423781 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27zh4\" (UniqueName: \"kubernetes.io/projected/7842430c-0bd1-459a-9840-f4c4d31baa52-kube-api-access-27zh4\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423799 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-metrics-certs\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423824 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-audit-policies\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423850 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-config\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423875 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423897 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/648b4624-2324-4ec1-aa88-5822c9f89034-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-w94qh\" (UID: \"648b4624-2324-4ec1-aa88-5822c9f89034\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423925 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxrpb\" (UniqueName: \"kubernetes.io/projected/49208aaf-d742-4bab-9b7e-b883e88096f6-kube-api-access-gxrpb\") pod \"downloads-7954f5f757-2xxgj\" (UID: \"49208aaf-d742-4bab-9b7e-b883e88096f6\") " pod="openshift-console/downloads-7954f5f757-2xxgj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423953 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-client-ca\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.423982 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hl8rf\" (UniqueName: \"kubernetes.io/projected/e77ea863-52c9-4f43-968f-73ba31a6b0de-kube-api-access-hl8rf\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424003 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7842430c-0bd1-459a-9840-f4c4d31baa52-serving-cert\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424024 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6msm\" (UniqueName: \"kubernetes.io/projected/65e269c0-8d17-410e-ae5c-4ff9ba053bf9-kube-api-access-t6msm\") pod \"cluster-samples-operator-665b6dd947-nm899\" (UID: \"65e269c0-8d17-410e-ae5c-4ff9ba053bf9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424045 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hn4gk\" (UniqueName: \"kubernetes.io/projected/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-kube-api-access-hn4gk\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424071 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424092 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-serving-cert\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424111 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b326d97a-72e4-4cbd-bea5-08613505b7e0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424135 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fede5fe4-d38c-46de-b334-32e9f56cf110-serving-cert\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424160 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-config\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424190 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb28612c-20e3-4319-9db8-dae18a593a1e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7pf2\" (UID: \"fb28612c-20e3-4319-9db8-dae18a593a1e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424229 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424247 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nklb9\" (UniqueName: \"kubernetes.io/projected/d952449e-c825-43d3-a591-0be473db6a53-kube-api-access-nklb9\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424276 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87lkg\" (UniqueName: \"kubernetes.io/projected/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-kube-api-access-87lkg\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424297 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424334 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9ebbc267-ceb6-444d-b767-493e54d573b6-profile-collector-cert\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424355 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-images\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424390 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424413 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-encryption-config\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424434 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424458 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424479 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9lh4\" (UniqueName: \"kubernetes.io/projected/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-kube-api-access-r9lh4\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424505 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424522 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kplgk\" (UniqueName: \"kubernetes.io/projected/648b4624-2324-4ec1-aa88-5822c9f89034-kube-api-access-kplgk\") pod \"control-plane-machine-set-operator-78cbb6b69f-w94qh\" (UID: \"648b4624-2324-4ec1-aa88-5822c9f89034\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424543 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424562 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-etcd-client\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.424584 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.427478 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.431265 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.431309 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b326d97a-72e4-4cbd-bea5-08613505b7e0-config\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.431448 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-audit-policies\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.432061 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-client-ca\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.433963 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-config\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.435563 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.436508 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-serving-cert\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.437189 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k2x5w"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.437885 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d58dc2f1-e069-4b63-8371-7c6b2735adab-audit-dir\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.437960 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fede5fe4-d38c-46de-b334-32e9f56cf110-serving-cert\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.439180 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b326d97a-72e4-4cbd-bea5-08613505b7e0-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.439476 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.439620 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d58dc2f1-e069-4b63-8371-7c6b2735adab-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.441821 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.439824 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-config\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.440957 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-serving-cert\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.441471 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-fzxnn"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.441459 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-client-ca\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.442111 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/65e269c0-8d17-410e-ae5c-4ff9ba053bf9-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-nm899\" (UID: \"65e269c0-8d17-410e-ae5c-4ff9ba053bf9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.442301 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.442799 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.442911 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-etcd-client\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.443900 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d58dc2f1-e069-4b63-8371-7c6b2735adab-encryption-config\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.444785 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bqwm2"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.447092 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-62v62"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.449588 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-62v62" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.451774 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-p5q7c"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.453768 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-p5q7c" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.456246 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.457876 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.458932 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.459188 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.461447 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.461471 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.463528 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-98r22"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.464777 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2t2xx"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.465958 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-h297g"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.467150 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.468382 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.469976 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.471139 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.474377 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-62v62"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.475968 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-xq92w"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.476705 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.477902 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sckww"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.479087 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rllbn"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.479323 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.480473 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.481591 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-p5q7c"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.482739 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ht2sn"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.483825 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.485003 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z4zvd"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.486165 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.487448 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-s8ws4"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.488609 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-m6bkw"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.489796 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.490917 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.492044 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.493178 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-djvcb"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.494456 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-djvcb"] Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.494500 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.498957 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.520195 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526077 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwh96\" (UniqueName: \"kubernetes.io/projected/765b0f79-4316-4aab-90fa-a2aaa84380f0-kube-api-access-dwh96\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526135 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9ebbc267-ceb6-444d-b767-493e54d573b6-profile-collector-cert\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526156 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-images\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526560 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526597 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9lh4\" (UniqueName: \"kubernetes.io/projected/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-kube-api-access-r9lh4\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526619 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cflk4\" (UniqueName: \"kubernetes.io/projected/1f79995c-e40b-4f4a-9001-0054f046a944-kube-api-access-cflk4\") pod \"dns-operator-744455d44c-s8ws4\" (UID: \"1f79995c-e40b-4f4a-9001-0054f046a944\") " pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526645 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-trusted-ca\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526669 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526721 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bzkj\" (UniqueName: \"kubernetes.io/projected/fb28612c-20e3-4319-9db8-dae18a593a1e-kube-api-access-2bzkj\") pod \"package-server-manager-789f6589d5-h7pf2\" (UID: \"fb28612c-20e3-4319-9db8-dae18a593a1e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526850 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-etcd-serving-ca\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526878 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-config\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526963 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-encryption-config\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.526988 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527010 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/87cf54e1-7498-430e-a517-6658bd9ce547-service-ca-bundle\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527030 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd93cb7d-1de5-4e04-957a-a91d0bb26134-serving-cert\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527056 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-stats-auth\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527081 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527150 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-serving-cert\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527176 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-config\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527197 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5xj2\" (UniqueName: \"kubernetes.io/projected/9ebbc267-ceb6-444d-b767-493e54d573b6-kube-api-access-t5xj2\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527238 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/059ac1e5-20d6-4a32-846e-d427680a560f-proxy-tls\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527280 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5zfh\" (UniqueName: \"kubernetes.io/projected/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-kube-api-access-s5zfh\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527346 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlmf5\" (UniqueName: \"kubernetes.io/projected/d0751583-7dc1-4547-9240-39d874a6ca87-kube-api-access-dlmf5\") pod \"multus-admission-controller-857f4d67dd-98r22\" (UID: \"d0751583-7dc1-4547-9240-39d874a6ca87\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527398 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-service-ca-bundle\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527427 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/059ac1e5-20d6-4a32-846e-d427680a560f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527453 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/468ca9dc-6af3-47ae-8c7c-b9338ceae695-node-pullsecrets\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527473 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8grsb\" (UniqueName: \"kubernetes.io/projected/059ac1e5-20d6-4a32-846e-d427680a560f-kube-api-access-8grsb\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527495 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-config\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527521 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9ebbc267-ceb6-444d-b767-493e54d573b6-srv-cert\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527544 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d0751583-7dc1-4547-9240-39d874a6ca87-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-98r22\" (UID: \"d0751583-7dc1-4547-9240-39d874a6ca87\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527563 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527583 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/059ac1e5-20d6-4a32-846e-d427680a560f-images\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527605 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4fa6813c-f797-44b6-8779-7cfc802379fc-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527626 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2nc2\" (UniqueName: \"kubernetes.io/projected/fd93cb7d-1de5-4e04-957a-a91d0bb26134-kube-api-access-z2nc2\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527645 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4f99\" (UniqueName: \"kubernetes.io/projected/87cf54e1-7498-430e-a517-6658bd9ce547-kube-api-access-t4f99\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527661 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-serving-cert\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527683 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527753 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-config\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527775 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527790 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d952449e-c825-43d3-a591-0be473db6a53-audit-dir\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527810 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/468ca9dc-6af3-47ae-8c7c-b9338ceae695-audit-dir\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527805 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527832 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27zh4\" (UniqueName: \"kubernetes.io/projected/7842430c-0bd1-459a-9840-f4c4d31baa52-kube-api-access-27zh4\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527853 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-metrics-certs\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527907 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-config\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.527972 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-audit-policies\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528019 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528049 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b8f1ab0-54be-4dad-97b3-a69b288dea67-config\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528076 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hl8rf\" (UniqueName: \"kubernetes.io/projected/e77ea863-52c9-4f43-968f-73ba31a6b0de-kube-api-access-hl8rf\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528103 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7842430c-0bd1-459a-9840-f4c4d31baa52-serving-cert\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528145 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hn4gk\" (UniqueName: \"kubernetes.io/projected/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-kube-api-access-hn4gk\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528170 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-serving-cert\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528194 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h8cs\" (UniqueName: \"kubernetes.io/projected/6b8f1ab0-54be-4dad-97b3-a69b288dea67-kube-api-access-8h8cs\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528267 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f39970d-138c-4369-838c-af7074913d3b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528326 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-config\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528351 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nklb9\" (UniqueName: \"kubernetes.io/projected/d952449e-c825-43d3-a591-0be473db6a53-kube-api-access-nklb9\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528386 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1f79995c-e40b-4f4a-9001-0054f046a944-metrics-tls\") pod \"dns-operator-744455d44c-s8ws4\" (UID: \"1f79995c-e40b-4f4a-9001-0054f046a944\") " pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528419 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb28612c-20e3-4319-9db8-dae18a593a1e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7pf2\" (UID: \"fb28612c-20e3-4319-9db8-dae18a593a1e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528443 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.528544 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-images\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.529443 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-etcd-serving-ca\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.529461 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530051 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-config\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530258 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-audit-policies\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530652 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-config\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530767 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530766 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530826 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3f39970d-138c-4369-838c-af7074913d3b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530853 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530877 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530897 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-config\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530917 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kplgk\" (UniqueName: \"kubernetes.io/projected/648b4624-2324-4ec1-aa88-5822c9f89034-kube-api-access-kplgk\") pod \"control-plane-machine-set-operator-78cbb6b69f-w94qh\" (UID: \"648b4624-2324-4ec1-aa88-5822c9f89034\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530936 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.530954 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-etcd-client\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.531528 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.531902 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-encryption-config\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.531946 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-service-ca-bundle\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.532048 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-config\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.532086 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7842430c-0bd1-459a-9840-f4c4d31baa52-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.532154 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d952449e-c825-43d3-a591-0be473db6a53-audit-dir\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.532380 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/468ca9dc-6af3-47ae-8c7c-b9338ceae695-node-pullsecrets\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.532565 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4fa6813c-f797-44b6-8779-7cfc802379fc-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.532610 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-default-certificate\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.532816 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwl79\" (UniqueName: \"kubernetes.io/projected/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-kube-api-access-dwl79\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.532983 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/468ca9dc-6af3-47ae-8c7c-b9338ceae695-audit-dir\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.533047 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.533112 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.533331 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-config\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.533453 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.533955 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-ca\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534000 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6mfl\" (UniqueName: \"kubernetes.io/projected/c699c950-dfd4-4513-8c2a-3f38c8094d22-kube-api-access-g6mfl\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534037 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e77ea863-52c9-4f43-968f-73ba31a6b0de-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534072 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534099 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-audit\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534123 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/765b0f79-4316-4aab-90fa-a2aaa84380f0-secret-volume\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534142 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534157 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534213 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534259 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534287 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c699c950-dfd4-4513-8c2a-3f38c8094d22-serving-cert\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534323 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-client\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534352 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-424x9\" (UniqueName: \"kubernetes.io/projected/4fa6813c-f797-44b6-8779-7cfc802379fc-kube-api-access-424x9\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534374 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534402 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4fa6813c-f797-44b6-8779-7cfc802379fc-proxy-tls\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534423 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534465 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534486 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tlxw\" (UniqueName: \"kubernetes.io/projected/468ca9dc-6af3-47ae-8c7c-b9338ceae695-kube-api-access-5tlxw\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534509 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f39970d-138c-4369-838c-af7074913d3b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534530 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/765b0f79-4316-4aab-90fa-a2aaa84380f0-config-volume\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534537 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534555 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e77ea863-52c9-4f43-968f-73ba31a6b0de-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534578 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-image-import-ca\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534620 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r9qq\" (UniqueName: \"kubernetes.io/projected/7a50e5be-5b15-472d-a504-3dc449b474e6-kube-api-access-2r9qq\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534639 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6b8f1ab0-54be-4dad-97b3-a69b288dea67-auth-proxy-config\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534658 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-service-ca\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534692 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6b8f1ab0-54be-4dad-97b3-a69b288dea67-machine-approver-tls\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534696 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-audit\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534724 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/648b4624-2324-4ec1-aa88-5822c9f89034-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-w94qh\" (UID: \"648b4624-2324-4ec1-aa88-5822c9f89034\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.534754 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.535767 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/d0751583-7dc1-4547-9240-39d874a6ca87-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-98r22\" (UID: \"d0751583-7dc1-4547-9240-39d874a6ca87\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.535794 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-image-import-ca\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.536027 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-serving-cert\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.536580 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb28612c-20e3-4319-9db8-dae18a593a1e-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h7pf2\" (UID: \"fb28612c-20e3-4319-9db8-dae18a593a1e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.536631 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.536759 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-serving-cert\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.537025 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.537459 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-stats-auth\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.537453 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.537735 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9ebbc267-ceb6-444d-b767-493e54d573b6-profile-collector-cert\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.537867 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.538525 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.538807 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/059ac1e5-20d6-4a32-846e-d427680a560f-images\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.538987 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/059ac1e5-20d6-4a32-846e-d427680a560f-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.539116 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.539490 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9ebbc267-ceb6-444d-b767-493e54d573b6-srv-cert\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.539496 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/468ca9dc-6af3-47ae-8c7c-b9338ceae695-trusted-ca-bundle\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.540178 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7842430c-0bd1-459a-9840-f4c4d31baa52-serving-cert\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.540778 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4fa6813c-f797-44b6-8779-7cfc802379fc-proxy-tls\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.540889 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e77ea863-52c9-4f43-968f-73ba31a6b0de-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.541149 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.541286 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/468ca9dc-6af3-47ae-8c7c-b9338ceae695-etcd-client\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.541342 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.542037 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/059ac1e5-20d6-4a32-846e-d427680a560f-proxy-tls\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.542941 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e77ea863-52c9-4f43-968f-73ba31a6b0de-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.543635 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.543809 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.544164 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/648b4624-2324-4ec1-aa88-5822c9f89034-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-w94qh\" (UID: \"648b4624-2324-4ec1-aa88-5822c9f89034\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.560808 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.579704 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.591361 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/87cf54e1-7498-430e-a517-6658bd9ce547-service-ca-bundle\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.601837 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.605886 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-metrics-certs\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.619132 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.628692 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/87cf54e1-7498-430e-a517-6658bd9ce547-default-certificate\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635469 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-config\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635522 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd93cb7d-1de5-4e04-957a-a91d0bb26134-serving-cert\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635562 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-config\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635616 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5zfh\" (UniqueName: \"kubernetes.io/projected/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-kube-api-access-s5zfh\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635687 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-config\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635746 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635795 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2nc2\" (UniqueName: \"kubernetes.io/projected/fd93cb7d-1de5-4e04-957a-a91d0bb26134-kube-api-access-z2nc2\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635840 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-serving-cert\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635911 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.635954 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b8f1ab0-54be-4dad-97b3-a69b288dea67-config\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636031 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h8cs\" (UniqueName: \"kubernetes.io/projected/6b8f1ab0-54be-4dad-97b3-a69b288dea67-kube-api-access-8h8cs\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636105 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f39970d-138c-4369-838c-af7074913d3b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636159 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1f79995c-e40b-4f4a-9001-0054f046a944-metrics-tls\") pod \"dns-operator-744455d44c-s8ws4\" (UID: \"1f79995c-e40b-4f4a-9001-0054f046a944\") " pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636211 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3f39970d-138c-4369-838c-af7074913d3b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636280 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636321 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-config\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636393 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwl79\" (UniqueName: \"kubernetes.io/projected/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-kube-api-access-dwl79\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636469 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-config\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636513 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636545 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-ca\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636579 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6mfl\" (UniqueName: \"kubernetes.io/projected/c699c950-dfd4-4513-8c2a-3f38c8094d22-kube-api-access-g6mfl\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636614 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636649 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/765b0f79-4316-4aab-90fa-a2aaa84380f0-secret-volume\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636687 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c699c950-dfd4-4513-8c2a-3f38c8094d22-serving-cert\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636720 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-client\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636771 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636810 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636854 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f39970d-138c-4369-838c-af7074913d3b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636902 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/765b0f79-4316-4aab-90fa-a2aaa84380f0-config-volume\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636948 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6b8f1ab0-54be-4dad-97b3-a69b288dea67-auth-proxy-config\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.636986 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-service-ca\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.637017 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6b8f1ab0-54be-4dad-97b3-a69b288dea67-machine-approver-tls\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.637055 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwh96\" (UniqueName: \"kubernetes.io/projected/765b0f79-4316-4aab-90fa-a2aaa84380f0-kube-api-access-dwh96\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.637112 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cflk4\" (UniqueName: \"kubernetes.io/projected/1f79995c-e40b-4f4a-9001-0054f046a944-kube-api-access-cflk4\") pod \"dns-operator-744455d44c-s8ws4\" (UID: \"1f79995c-e40b-4f4a-9001-0054f046a944\") " pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.637155 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-trusted-ca\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.639850 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.640567 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/765b0f79-4316-4aab-90fa-a2aaa84380f0-secret-volume\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.662478 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.680704 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.700265 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.731619 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.738010 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.740423 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.761125 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.780750 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.800017 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.820042 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.839521 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.860750 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.881951 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.900887 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.920265 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.940533 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.950729 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3f39970d-138c-4369-838c-af7074913d3b-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.960739 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 06:55:10 crc kubenswrapper[4922]: I1128 06:55:10.979407 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.000578 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.030771 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.040036 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.060532 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.080755 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.090269 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.100938 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.107708 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-config\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.120681 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.128636 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3f39970d-138c-4369-838c-af7074913d3b-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.141794 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.161885 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.180666 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.201103 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.210316 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1f79995c-e40b-4f4a-9001-0054f046a944-metrics-tls\") pod \"dns-operator-744455d44c-s8ws4\" (UID: \"1f79995c-e40b-4f4a-9001-0054f046a944\") " pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.221613 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.240910 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.251459 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.260286 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.280920 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.288561 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-service-ca\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.300637 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.320853 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.340853 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.360461 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.372668 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c699c950-dfd4-4513-8c2a-3f38c8094d22-serving-cert\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.379955 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.391192 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-client\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.398324 4922 request.go:700] Waited for 1.01446641s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-etcd-operator/configmaps?fieldSelector=metadata.name%3Detcd-operator-config&limit=500&resourceVersion=0 Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.400178 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.408327 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-config\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.420784 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.441284 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.447962 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c699c950-dfd4-4513-8c2a-3f38c8094d22-etcd-ca\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.480809 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.488311 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/765b0f79-4316-4aab-90fa-a2aaa84380f0-config-volume\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.500025 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.520902 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.540563 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.562151 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.572436 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/6b8f1ab0-54be-4dad-97b3-a69b288dea67-machine-approver-tls\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.581357 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.588366 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6b8f1ab0-54be-4dad-97b3-a69b288dea67-auth-proxy-config\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.600153 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.606824 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b8f1ab0-54be-4dad-97b3-a69b288dea67-config\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.620203 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.635973 4922 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.636037 4922 secret.go:188] Couldn't get secret openshift-console-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.636040 4922 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.636106 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-config podName:71cf79c0-841b-4024-9b3d-9c49ca7d89d9 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:12.136074689 +0000 UTC m=+157.056470391 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-config") pod "service-ca-operator-777779d784-hgtbm" (UID: "71cf79c0-841b-4024-9b3d-9c49ca7d89d9") : failed to sync configmap cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.635993 4922 configmap.go:193] Couldn't get configMap openshift-console-operator/console-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.636129 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd93cb7d-1de5-4e04-957a-a91d0bb26134-serving-cert podName:fd93cb7d-1de5-4e04-957a-a91d0bb26134 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:12.13611946 +0000 UTC m=+157.056515172 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/fd93cb7d-1de5-4e04-957a-a91d0bb26134-serving-cert") pod "console-operator-58897d9998-z4zvd" (UID: "fd93cb7d-1de5-4e04-957a-a91d0bb26134") : failed to sync secret cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.636176 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-serving-cert podName:71cf79c0-841b-4024-9b3d-9c49ca7d89d9 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:12.13614068 +0000 UTC m=+157.056536322 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-serving-cert") pod "service-ca-operator-777779d784-hgtbm" (UID: "71cf79c0-841b-4024-9b3d-9c49ca7d89d9") : failed to sync secret cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.636254 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-config podName:fd93cb7d-1de5-4e04-957a-a91d0bb26134 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:12.136212002 +0000 UTC m=+157.056607624 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-config") pod "console-operator-58897d9998-z4zvd" (UID: "fd93cb7d-1de5-4e04-957a-a91d0bb26134") : failed to sync configmap cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.636639 4922 configmap.go:193] Couldn't get configMap openshift-kube-apiserver-operator/kube-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.636713 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-config podName:c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf nodeName:}" failed. No retries permitted until 2025-11-28 06:55:12.136700826 +0000 UTC m=+157.057096598 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-config") pod "kube-apiserver-operator-766d6c64bb-5pc2z" (UID: "c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf") : failed to sync configmap cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.637353 4922 secret.go:188] Couldn't get secret openshift-kube-apiserver-operator/kube-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.637389 4922 configmap.go:193] Couldn't get configMap openshift-console-operator/trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.637443 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-serving-cert podName:c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf nodeName:}" failed. No retries permitted until 2025-11-28 06:55:12.137420457 +0000 UTC m=+157.057816229 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-serving-cert") pod "kube-apiserver-operator-766d6c64bb-5pc2z" (UID: "c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf") : failed to sync secret cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: E1128 06:55:11.637480 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-trusted-ca podName:fd93cb7d-1de5-4e04-957a-a91d0bb26134 nodeName:}" failed. No retries permitted until 2025-11-28 06:55:12.137457718 +0000 UTC m=+157.057853460 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca" (UniqueName: "kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-trusted-ca") pod "console-operator-58897d9998-z4zvd" (UID: "fd93cb7d-1de5-4e04-957a-a91d0bb26134") : failed to sync configmap cache: timed out waiting for the condition Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.640986 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.661018 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.680311 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.700175 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.719999 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.740890 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.761689 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.780329 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.801294 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.820902 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.840919 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.860511 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.881105 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.900888 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.920556 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.940174 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.972288 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 06:55:11 crc kubenswrapper[4922]: I1128 06:55:11.992069 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.000381 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.020798 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.067308 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wdk2\" (UniqueName: \"kubernetes.io/projected/d58dc2f1-e069-4b63-8371-7c6b2735adab-kube-api-access-4wdk2\") pod \"apiserver-7bbb656c7d-9t6kr\" (UID: \"d58dc2f1-e069-4b63-8371-7c6b2735adab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.088406 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87lkg\" (UniqueName: \"kubernetes.io/projected/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-kube-api-access-87lkg\") pod \"route-controller-manager-6576b87f9c-qckrl\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.113300 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88lpg\" (UniqueName: \"kubernetes.io/projected/b326d97a-72e4-4cbd-bea5-08613505b7e0-kube-api-access-88lpg\") pod \"openshift-apiserver-operator-796bbdcf4f-7tnsd\" (UID: \"b326d97a-72e4-4cbd-bea5-08613505b7e0\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.119739 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w46t2\" (UniqueName: \"kubernetes.io/projected/ddd22a7d-d9fd-41c5-a83a-8b68574e637e-kube-api-access-w46t2\") pod \"openshift-controller-manager-operator-756b6f6bc6-4b5vr\" (UID: \"ddd22a7d-d9fd-41c5-a83a-8b68574e637e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.140395 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.145444 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bz7dq\" (UniqueName: \"kubernetes.io/projected/fede5fe4-d38c-46de-b334-32e9f56cf110-kube-api-access-bz7dq\") pod \"controller-manager-879f6c89f-xlthx\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.166102 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxrpb\" (UniqueName: \"kubernetes.io/projected/49208aaf-d742-4bab-9b7e-b883e88096f6-kube-api-access-gxrpb\") pod \"downloads-7954f5f757-2xxgj\" (UID: \"49208aaf-d742-4bab-9b7e-b883e88096f6\") " pod="openshift-console/downloads-7954f5f757-2xxgj" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.168256 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-trusted-ca\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.168310 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-config\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.168333 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd93cb7d-1de5-4e04-957a-a91d0bb26134-serving-cert\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.168378 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-config\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.168410 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-serving-cert\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.168493 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-config\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.168525 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.170080 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-config\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.171128 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-config\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.171566 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-config\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.171802 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd93cb7d-1de5-4e04-957a-a91d0bb26134-trusted-ca\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.173032 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fd93cb7d-1de5-4e04-957a-a91d0bb26134-serving-cert\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.173431 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.174803 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2xxgj" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.175495 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-serving-cert\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.190196 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6msm\" (UniqueName: \"kubernetes.io/projected/65e269c0-8d17-410e-ae5c-4ff9ba053bf9-kube-api-access-t6msm\") pod \"cluster-samples-operator-665b6dd947-nm899\" (UID: \"65e269c0-8d17-410e-ae5c-4ff9ba053bf9\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.201287 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.220324 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.221185 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.228621 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.245610 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.261321 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.281540 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.302930 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.320559 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.342165 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.359771 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.363235 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.376067 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr"] Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.377363 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.383481 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.398535 4922 request.go:700] Waited for 1.903700994s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/hostpath-provisioner/secrets?fieldSelector=metadata.name%3Dcsi-hostpath-provisioner-sa-dockercfg-qd74k&limit=500&resourceVersion=0 Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.400423 4922 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.416933 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2xxgj"] Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.420395 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 06:55:12 crc kubenswrapper[4922]: W1128 06:55:12.426077 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49208aaf_d742_4bab_9b7e_b883e88096f6.slice/crio-f443ce9fdf4640f21211ea73566ffbc73b441a9d20f1609fc80cea88c382dada WatchSource:0}: Error finding container f443ce9fdf4640f21211ea73566ffbc73b441a9d20f1609fc80cea88c382dada: Status 404 returned error can't find the container with id f443ce9fdf4640f21211ea73566ffbc73b441a9d20f1609fc80cea88c382dada Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.447638 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.448716 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr"] Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.488069 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.498784 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlthx"] Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.502488 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bzkj\" (UniqueName: \"kubernetes.io/projected/fb28612c-20e3-4319-9db8-dae18a593a1e-kube-api-access-2bzkj\") pod \"package-server-manager-789f6589d5-h7pf2\" (UID: \"fb28612c-20e3-4319-9db8-dae18a593a1e\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.517307 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9lh4\" (UniqueName: \"kubernetes.io/projected/da14c12f-6323-4600-9d9e-b2e5d53e1ecb-kube-api-access-r9lh4\") pod \"openshift-config-operator-7777fb866f-4gt9m\" (UID: \"da14c12f-6323-4600-9d9e-b2e5d53e1ecb\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.517805 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nklb9\" (UniqueName: \"kubernetes.io/projected/d952449e-c825-43d3-a591-0be473db6a53-kube-api-access-nklb9\") pod \"oauth-openshift-558db77b4-bqwm2\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.538088 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.555373 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5xj2\" (UniqueName: \"kubernetes.io/projected/9ebbc267-ceb6-444d-b767-493e54d573b6-kube-api-access-t5xj2\") pod \"catalog-operator-68c6474976-jf9pl\" (UID: \"9ebbc267-ceb6-444d-b767-493e54d573b6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.560392 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlmf5\" (UniqueName: \"kubernetes.io/projected/d0751583-7dc1-4547-9240-39d874a6ca87-kube-api-access-dlmf5\") pod \"multus-admission-controller-857f4d67dd-98r22\" (UID: \"d0751583-7dc1-4547-9240-39d874a6ca87\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.575726 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.578674 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kplgk\" (UniqueName: \"kubernetes.io/projected/648b4624-2324-4ec1-aa88-5822c9f89034-kube-api-access-kplgk\") pod \"control-plane-machine-set-operator-78cbb6b69f-w94qh\" (UID: \"648b4624-2324-4ec1-aa88-5822c9f89034\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.583602 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.591272 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.598296 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4f99\" (UniqueName: \"kubernetes.io/projected/87cf54e1-7498-430e-a517-6658bd9ce547-kube-api-access-t4f99\") pod \"router-default-5444994796-xbhv9\" (UID: \"87cf54e1-7498-430e-a517-6658bd9ce547\") " pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.603890 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.606175 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd"] Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.620751 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.624383 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hn4gk\" (UniqueName: \"kubernetes.io/projected/b5c2983d-2d59-4691-b2cd-130bfcd3e18c-kube-api-access-hn4gk\") pod \"machine-api-operator-5694c8668f-rllbn\" (UID: \"b5c2983d-2d59-4691-b2cd-130bfcd3e18c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.631958 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.637809 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hl8rf\" (UniqueName: \"kubernetes.io/projected/e77ea863-52c9-4f43-968f-73ba31a6b0de-kube-api-access-hl8rf\") pod \"kube-storage-version-migrator-operator-b67b599dd-gzqmt\" (UID: \"e77ea863-52c9-4f43-968f-73ba31a6b0de\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.662944 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8grsb\" (UniqueName: \"kubernetes.io/projected/059ac1e5-20d6-4a32-846e-d427680a560f-kube-api-access-8grsb\") pod \"machine-config-operator-74547568cd-ln9mh\" (UID: \"059ac1e5-20d6-4a32-846e-d427680a560f\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.669350 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl"] Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.679145 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.690855 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.685668 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27zh4\" (UniqueName: \"kubernetes.io/projected/7842430c-0bd1-459a-9840-f4c4d31baa52-kube-api-access-27zh4\") pod \"authentication-operator-69f744f599-fzxnn\" (UID: \"7842430c-0bd1-459a-9840-f4c4d31baa52\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:12 crc kubenswrapper[4922]: W1128 06:55:12.701898 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1aefeaa5_e3f1_4aed_b152_35d380c3f87b.slice/crio-4dd1dc28c70662a0d690ebffa4d4a09d15ce47c18d9375bad4dd0d5016896980 WatchSource:0}: Error finding container 4dd1dc28c70662a0d690ebffa4d4a09d15ce47c18d9375bad4dd0d5016896980: Status 404 returned error can't find the container with id 4dd1dc28c70662a0d690ebffa4d4a09d15ce47c18d9375bad4dd0d5016896980 Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.704966 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.718575 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r9qq\" (UniqueName: \"kubernetes.io/projected/7a50e5be-5b15-472d-a504-3dc449b474e6-kube-api-access-2r9qq\") pod \"marketplace-operator-79b997595-2t2xx\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.724866 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.736634 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-424x9\" (UniqueName: \"kubernetes.io/projected/4fa6813c-f797-44b6-8779-7cfc802379fc-kube-api-access-424x9\") pod \"machine-config-controller-84d6567774-qgppj\" (UID: \"4fa6813c-f797-44b6-8779-7cfc802379fc\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.739792 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tlxw\" (UniqueName: \"kubernetes.io/projected/468ca9dc-6af3-47ae-8c7c-b9338ceae695-kube-api-access-5tlxw\") pod \"apiserver-76f77b778f-ht2sn\" (UID: \"468ca9dc-6af3-47ae-8c7c-b9338ceae695\") " pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.768205 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2nc2\" (UniqueName: \"kubernetes.io/projected/fd93cb7d-1de5-4e04-957a-a91d0bb26134-kube-api-access-z2nc2\") pod \"console-operator-58897d9998-z4zvd\" (UID: \"fd93cb7d-1de5-4e04-957a-a91d0bb26134\") " pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.778886 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h8cs\" (UniqueName: \"kubernetes.io/projected/6b8f1ab0-54be-4dad-97b3-a69b288dea67-kube-api-access-8h8cs\") pod \"machine-approver-56656f9798-pp742\" (UID: \"6b8f1ab0-54be-4dad-97b3-a69b288dea67\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.800976 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.809716 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-5pc2z\" (UID: \"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.817714 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.823330 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.823910 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3f39970d-138c-4369-838c-af7074913d3b-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-7wtcp\" (UID: \"3f39970d-138c-4369-838c-af7074913d3b\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.836823 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5zfh\" (UniqueName: \"kubernetes.io/projected/71cf79c0-841b-4024-9b3d-9c49ca7d89d9-kube-api-access-s5zfh\") pod \"service-ca-operator-777779d784-hgtbm\" (UID: \"71cf79c0-841b-4024-9b3d-9c49ca7d89d9\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.857485 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwl79\" (UniqueName: \"kubernetes.io/projected/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-kube-api-access-dwl79\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.874954 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72c05220-a1c6-4f3a-a8f1-c588a4e062b0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sfzn5\" (UID: \"72c05220-a1c6-4f3a-a8f1-c588a4e062b0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.897390 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.898654 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6mfl\" (UniqueName: \"kubernetes.io/projected/c699c950-dfd4-4513-8c2a-3f38c8094d22-kube-api-access-g6mfl\") pod \"etcd-operator-b45778765-m6bkw\" (UID: \"c699c950-dfd4-4513-8c2a-3f38c8094d22\") " pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.920549 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.923981 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cflk4\" (UniqueName: \"kubernetes.io/projected/1f79995c-e40b-4f4a-9001-0054f046a944-kube-api-access-cflk4\") pod \"dns-operator-744455d44c-s8ws4\" (UID: \"1f79995c-e40b-4f4a-9001-0054f046a944\") " pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.941105 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwh96\" (UniqueName: \"kubernetes.io/projected/765b0f79-4316-4aab-90fa-a2aaa84380f0-kube-api-access-dwh96\") pod \"collect-profiles-29405205-gln9g\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.958424 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9cfee6d8-0106-4b57-98e3-23eaed2b9ff7-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-98sbt\" (UID: \"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.989308 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991491 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx2qx\" (UniqueName: \"kubernetes.io/projected/83a5410b-be02-42e8-9b5d-7d476dc779fc-kube-api-access-mx2qx\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991512 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-trusted-ca\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991529 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/83a5410b-be02-42e8-9b5d-7d476dc779fc-signing-cabundle\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991548 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1090992f-7c26-40f9-95c6-eaf05f31fda2-tmpfs\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991564 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-oauth-config\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991631 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1090992f-7c26-40f9-95c6-eaf05f31fda2-apiservice-cert\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991649 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6l7n\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-kube-api-access-s6l7n\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991684 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8552\" (UniqueName: \"kubernetes.io/projected/7de83d0f-3269-4343-b2a9-398b8d4af4fc-kube-api-access-g8552\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991720 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-config\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991746 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-oauth-serving-cert\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991765 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-serving-cert\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991820 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991837 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cd6df89d-cec7-40af-acae-2b1308378819-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991852 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q862v\" (UniqueName: \"kubernetes.io/projected/e2e57fd7-85d0-4872-8737-c6423b16b702-kube-api-access-q862v\") pod \"migrator-59844c95c7-54gpd\" (UID: \"e2e57fd7-85d0-4872-8737-c6423b16b702\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991866 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-service-ca\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.991882 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m8sp\" (UniqueName: \"kubernetes.io/projected/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-kube-api-access-7m8sp\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993132 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-certificates\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993159 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-bound-sa-token\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993189 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-trusted-ca-bundle\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993206 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t57g\" (UniqueName: \"kubernetes.io/projected/1090992f-7c26-40f9-95c6-eaf05f31fda2-kube-api-access-6t57g\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993237 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dd20079d-34c4-4ea6-920f-420a1d6bb863-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993254 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/83a5410b-be02-42e8-9b5d-7d476dc779fc-signing-key\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993272 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-metrics-tls\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993313 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993344 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7ftt\" (UniqueName: \"kubernetes.io/projected/cd6df89d-cec7-40af-acae-2b1308378819-kube-api-access-p7ftt\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993360 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-trusted-ca\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993375 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dd20079d-34c4-4ea6-920f-420a1d6bb863-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993393 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cd6df89d-cec7-40af-acae-2b1308378819-srv-cert\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993409 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1090992f-7c26-40f9-95c6-eaf05f31fda2-webhook-cert\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.993425 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-tls\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:12 crc kubenswrapper[4922]: E1128 06:55:12.994343 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:13.494330233 +0000 UTC m=+158.414725815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:12 crc kubenswrapper[4922]: I1128 06:55:12.997369 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.050978 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.055971 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.063643 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.084404 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.094948 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095074 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-metrics-tls\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095121 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/03dc5df6-dfb3-4839-9c45-71894d1b547b-config-volume\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095238 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095256 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7ftt\" (UniqueName: \"kubernetes.io/projected/cd6df89d-cec7-40af-acae-2b1308378819-kube-api-access-p7ftt\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095286 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-trusted-ca\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095309 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dd20079d-34c4-4ea6-920f-420a1d6bb863-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095349 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cd6df89d-cec7-40af-acae-2b1308378819-srv-cert\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095376 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1090992f-7c26-40f9-95c6-eaf05f31fda2-webhook-cert\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095411 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-tls\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095430 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-registration-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095462 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49678f48-cc19-4c5d-8b9e-2926f02770d9-cert\") pod \"ingress-canary-p5q7c\" (UID: \"49678f48-cc19-4c5d-8b9e-2926f02770d9\") " pod="openshift-ingress-canary/ingress-canary-p5q7c" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095481 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-plugins-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095543 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx2qx\" (UniqueName: \"kubernetes.io/projected/83a5410b-be02-42e8-9b5d-7d476dc779fc-kube-api-access-mx2qx\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095559 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-trusted-ca\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095594 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/83a5410b-be02-42e8-9b5d-7d476dc779fc-signing-cabundle\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095640 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1090992f-7c26-40f9-95c6-eaf05f31fda2-tmpfs\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095656 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7c07a370-f5fd-46a2-b0a6-06a2850b5233-certs\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095684 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l2s9\" (UniqueName: \"kubernetes.io/projected/03dc5df6-dfb3-4839-9c45-71894d1b547b-kube-api-access-7l2s9\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095703 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-oauth-config\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095778 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/03dc5df6-dfb3-4839-9c45-71894d1b547b-metrics-tls\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095824 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1090992f-7c26-40f9-95c6-eaf05f31fda2-apiservice-cert\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095850 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6l7n\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-kube-api-access-s6l7n\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095865 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-socket-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095925 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfbc5\" (UniqueName: \"kubernetes.io/projected/04c1fcf2-5298-423f-ab9e-56043ced3e2f-kube-api-access-rfbc5\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095944 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8552\" (UniqueName: \"kubernetes.io/projected/7de83d0f-3269-4343-b2a9-398b8d4af4fc-kube-api-access-g8552\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.095985 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-csi-data-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096002 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-config\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096020 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-oauth-serving-cert\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096036 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-mountpoint-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096069 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-serving-cert\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096138 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cd6df89d-cec7-40af-acae-2b1308378819-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096154 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q862v\" (UniqueName: \"kubernetes.io/projected/e2e57fd7-85d0-4872-8737-c6423b16b702-kube-api-access-q862v\") pod \"migrator-59844c95c7-54gpd\" (UID: \"e2e57fd7-85d0-4872-8737-c6423b16b702\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096169 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-service-ca\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096211 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m8sp\" (UniqueName: \"kubernetes.io/projected/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-kube-api-access-7m8sp\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096361 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ts9l\" (UniqueName: \"kubernetes.io/projected/7c07a370-f5fd-46a2-b0a6-06a2850b5233-kube-api-access-5ts9l\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096380 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-certificates\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096405 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-bound-sa-token\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096431 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7c07a370-f5fd-46a2-b0a6-06a2850b5233-node-bootstrap-token\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096446 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f295x\" (UniqueName: \"kubernetes.io/projected/49678f48-cc19-4c5d-8b9e-2926f02770d9-kube-api-access-f295x\") pod \"ingress-canary-p5q7c\" (UID: \"49678f48-cc19-4c5d-8b9e-2926f02770d9\") " pod="openshift-ingress-canary/ingress-canary-p5q7c" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096529 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-trusted-ca-bundle\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096551 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t57g\" (UniqueName: \"kubernetes.io/projected/1090992f-7c26-40f9-95c6-eaf05f31fda2-kube-api-access-6t57g\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096607 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dd20079d-34c4-4ea6-920f-420a1d6bb863-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.096624 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/83a5410b-be02-42e8-9b5d-7d476dc779fc-signing-key\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.097057 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:13.597038249 +0000 UTC m=+158.517433831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.097545 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.097753 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.099777 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-trusted-ca\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.100879 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-trusted-ca-bundle\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.101870 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dd20079d-34c4-4ea6-920f-420a1d6bb863-ca-trust-extracted\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.102678 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-service-ca\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.103099 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/83a5410b-be02-42e8-9b5d-7d476dc779fc-signing-cabundle\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.103808 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/1090992f-7c26-40f9-95c6-eaf05f31fda2-tmpfs\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.184485 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.185554 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/83a5410b-be02-42e8-9b5d-7d476dc779fc-signing-key\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.185682 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dd20079d-34c4-4ea6-920f-420a1d6bb863-installation-pull-secrets\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.189204 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-oauth-config\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.190384 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1090992f-7c26-40f9-95c6-eaf05f31fda2-apiservice-cert\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.190845 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-metrics-tls\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.192077 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-certificates\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.192128 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-config\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.193009 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-oauth-serving-cert\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.203934 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-trusted-ca\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.205445 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.205495 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cd6df89d-cec7-40af-acae-2b1308378819-srv-cert\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.209284 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-plugins-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.209531 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-plugins-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.210757 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-csi-data-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.210811 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-mountpoint-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.210888 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.210959 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ts9l\" (UniqueName: \"kubernetes.io/projected/7c07a370-f5fd-46a2-b0a6-06a2850b5233-kube-api-access-5ts9l\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.210995 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7c07a370-f5fd-46a2-b0a6-06a2850b5233-node-bootstrap-token\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.211021 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f295x\" (UniqueName: \"kubernetes.io/projected/49678f48-cc19-4c5d-8b9e-2926f02770d9-kube-api-access-f295x\") pod \"ingress-canary-p5q7c\" (UID: \"49678f48-cc19-4c5d-8b9e-2926f02770d9\") " pod="openshift-ingress-canary/ingress-canary-p5q7c" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.211096 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/03dc5df6-dfb3-4839-9c45-71894d1b547b-config-volume\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.218136 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/03dc5df6-dfb3-4839-9c45-71894d1b547b-config-volume\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.218416 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-csi-data-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.218465 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-mountpoint-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.219383 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:13.719350313 +0000 UTC m=+158.639745895 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.223615 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1090992f-7c26-40f9-95c6-eaf05f31fda2-webhook-cert\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.223667 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-serving-cert\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.225138 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cd6df89d-cec7-40af-acae-2b1308378819-profile-collector-cert\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.229361 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/7c07a370-f5fd-46a2-b0a6-06a2850b5233-node-bootstrap-token\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.231449 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m8sp\" (UniqueName: \"kubernetes.io/projected/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-kube-api-access-7m8sp\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.232836 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-tls\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.283954 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bqwm2"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.286158 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.292541 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q862v\" (UniqueName: \"kubernetes.io/projected/e2e57fd7-85d0-4872-8737-c6423b16b702-kube-api-access-q862v\") pod \"migrator-59844c95c7-54gpd\" (UID: \"e2e57fd7-85d0-4872-8737-c6423b16b702\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.293101 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx2qx\" (UniqueName: \"kubernetes.io/projected/83a5410b-be02-42e8-9b5d-7d476dc779fc-kube-api-access-mx2qx\") pod \"service-ca-9c57cc56f-k2x5w\" (UID: \"83a5410b-be02-42e8-9b5d-7d476dc779fc\") " pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.293627 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t57g\" (UniqueName: \"kubernetes.io/projected/1090992f-7c26-40f9-95c6-eaf05f31fda2-kube-api-access-6t57g\") pod \"packageserver-d55dfcdfc-r4frz\" (UID: \"1090992f-7c26-40f9-95c6-eaf05f31fda2\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.294893 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-bound-sa-token\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.294887 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8552\" (UniqueName: \"kubernetes.io/projected/7de83d0f-3269-4343-b2a9-398b8d4af4fc-kube-api-access-g8552\") pod \"console-f9d7485db-h297g\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.309608 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-d6r4k\" (UID: \"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.312566 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.312667 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7c07a370-f5fd-46a2-b0a6-06a2850b5233-certs\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.312695 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l2s9\" (UniqueName: \"kubernetes.io/projected/03dc5df6-dfb3-4839-9c45-71894d1b547b-kube-api-access-7l2s9\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.312725 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/03dc5df6-dfb3-4839-9c45-71894d1b547b-metrics-tls\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.312753 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-socket-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.312770 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfbc5\" (UniqueName: \"kubernetes.io/projected/04c1fcf2-5298-423f-ab9e-56043ced3e2f-kube-api-access-rfbc5\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.312849 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-registration-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.312864 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49678f48-cc19-4c5d-8b9e-2926f02770d9-cert\") pod \"ingress-canary-p5q7c\" (UID: \"49678f48-cc19-4c5d-8b9e-2926f02770d9\") " pod="openshift-ingress-canary/ingress-canary-p5q7c" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.315755 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-socket-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.315862 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/04c1fcf2-5298-423f-ab9e-56043ced3e2f-registration-dir\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.315944 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:13.815923707 +0000 UTC m=+158.736319289 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.315944 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.318288 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.319246 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.319468 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/7c07a370-f5fd-46a2-b0a6-06a2850b5233-certs\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.321434 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/03dc5df6-dfb3-4839-9c45-71894d1b547b-metrics-tls\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.322003 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7ftt\" (UniqueName: \"kubernetes.io/projected/cd6df89d-cec7-40af-acae-2b1308378819-kube-api-access-p7ftt\") pod \"olm-operator-6b444d44fb-8nrlh\" (UID: \"cd6df89d-cec7-40af-acae-2b1308378819\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.325148 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6l7n\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-kube-api-access-s6l7n\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.325154 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.328535 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/49678f48-cc19-4c5d-8b9e-2926f02770d9-cert\") pod \"ingress-canary-p5q7c\" (UID: \"49678f48-cc19-4c5d-8b9e-2926f02770d9\") " pod="openshift-ingress-canary/ingress-canary-p5q7c" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.337818 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ts9l\" (UniqueName: \"kubernetes.io/projected/7c07a370-f5fd-46a2-b0a6-06a2850b5233-kube-api-access-5ts9l\") pod \"machine-config-server-xq92w\" (UID: \"7c07a370-f5fd-46a2-b0a6-06a2850b5233\") " pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.338131 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.346035 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" Nov 28 06:55:13 crc kubenswrapper[4922]: W1128 06:55:13.355527 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ebbc267_ceb6_444d_b767_493e54d573b6.slice/crio-23190f3fcf1d069c809dc3ff3eba53d38045688a3a871c563cf589157f5902d5 WatchSource:0}: Error finding container 23190f3fcf1d069c809dc3ff3eba53d38045688a3a871c563cf589157f5902d5: Status 404 returned error can't find the container with id 23190f3fcf1d069c809dc3ff3eba53d38045688a3a871c563cf589157f5902d5 Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.365602 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" event={"ID":"b326d97a-72e4-4cbd-bea5-08613505b7e0","Type":"ContainerStarted","Data":"0c7480201b3c277abdafc79f6d19c022b18aa6527e859f9bcfd66386170c1843"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.365650 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" event={"ID":"b326d97a-72e4-4cbd-bea5-08613505b7e0","Type":"ContainerStarted","Data":"43a39935473630d872f54cdadbaf867fc5644d6e0fac5713454d0a748343407c"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.368753 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f295x\" (UniqueName: \"kubernetes.io/projected/49678f48-cc19-4c5d-8b9e-2926f02770d9-kube-api-access-f295x\") pod \"ingress-canary-p5q7c\" (UID: \"49678f48-cc19-4c5d-8b9e-2926f02770d9\") " pod="openshift-ingress-canary/ingress-canary-p5q7c" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.376056 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.385492 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-xbhv9" event={"ID":"87cf54e1-7498-430e-a517-6658bd9ce547","Type":"ContainerStarted","Data":"ff4b16aafaa953e6f359107a642c1a30bc217d45204c36b5f0ccbb8ba75146f0"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.385553 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-xbhv9" event={"ID":"87cf54e1-7498-430e-a517-6658bd9ce547","Type":"ContainerStarted","Data":"7c3cf80721f3ee9ec1b02cdb700199c346ae0b63e18dc71e3b8627a7417c2895"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.387118 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" event={"ID":"d952449e-c825-43d3-a591-0be473db6a53","Type":"ContainerStarted","Data":"7b38742f2ca0fdd464f439f7c942c558bd4abf399f0820f11a39f651c50772ea"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.404920 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfbc5\" (UniqueName: \"kubernetes.io/projected/04c1fcf2-5298-423f-ab9e-56043ced3e2f-kube-api-access-rfbc5\") pod \"csi-hostpathplugin-djvcb\" (UID: \"04c1fcf2-5298-423f-ab9e-56043ced3e2f\") " pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.411069 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l2s9\" (UniqueName: \"kubernetes.io/projected/03dc5df6-dfb3-4839-9c45-71894d1b547b-kube-api-access-7l2s9\") pod \"dns-default-62v62\" (UID: \"03dc5df6-dfb3-4839-9c45-71894d1b547b\") " pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.412947 4922 generic.go:334] "Generic (PLEG): container finished" podID="d58dc2f1-e069-4b63-8371-7c6b2735adab" containerID="a42a9cb33ea7c63863d690ac9db560ecdfe34636ea134ee619522e6358b3c6ff" exitCode=0 Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.413296 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.414652 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:13.91463659 +0000 UTC m=+158.835032172 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.431618 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.437788 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-62v62" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.445808 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-p5q7c" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.454299 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-xq92w" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.473509 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-djvcb" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503578 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" event={"ID":"1aefeaa5-e3f1-4aed-b152-35d380c3f87b","Type":"ContainerStarted","Data":"dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503630 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503642 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" event={"ID":"1aefeaa5-e3f1-4aed-b152-35d380c3f87b","Type":"ContainerStarted","Data":"4dd1dc28c70662a0d690ebffa4d4a09d15ce47c18d9375bad4dd0d5016896980"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503661 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-2xxgj" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503679 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503690 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2xxgj" event={"ID":"49208aaf-d742-4bab-9b7e-b883e88096f6","Type":"ContainerStarted","Data":"79d93dadb7e7f03b245d5a3adc149d45c6446e89e8d0455e1ffc9315bfae0dd6"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503702 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2xxgj" event={"ID":"49208aaf-d742-4bab-9b7e-b883e88096f6","Type":"ContainerStarted","Data":"f443ce9fdf4640f21211ea73566ffbc73b441a9d20f1609fc80cea88c382dada"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503716 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503736 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" event={"ID":"d58dc2f1-e069-4b63-8371-7c6b2735adab","Type":"ContainerDied","Data":"a42a9cb33ea7c63863d690ac9db560ecdfe34636ea134ee619522e6358b3c6ff"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503752 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503765 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" event={"ID":"d58dc2f1-e069-4b63-8371-7c6b2735adab","Type":"ContainerStarted","Data":"8acecfeebbdf93e719acaddf24a3fed575ee193006fb7c7254693470cb69fa50"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503777 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-98r22"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503824 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" event={"ID":"ddd22a7d-d9fd-41c5-a83a-8b68574e637e","Type":"ContainerStarted","Data":"460b0ec7d7ce653f09fd713ea4d672b89b47ae843272a4b60fd8cf580db65b74"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503839 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" event={"ID":"ddd22a7d-d9fd-41c5-a83a-8b68574e637e","Type":"ContainerStarted","Data":"29ced218a55c81c1b6e4bcc85981cfd3654e552a19d1559fa7f6bc449c3fc2c6"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503853 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" event={"ID":"fede5fe4-d38c-46de-b334-32e9f56cf110","Type":"ContainerStarted","Data":"7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503864 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" event={"ID":"fede5fe4-d38c-46de-b334-32e9f56cf110","Type":"ContainerStarted","Data":"265157a4986ec26a5131405d0872c17d92257f0f440decae6ac9d35584072e74"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.503875 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" event={"ID":"6b8f1ab0-54be-4dad-97b3-a69b288dea67","Type":"ContainerStarted","Data":"db323692e758b09d7aca94c82a3f3280e062e4932db1dc0993667e51843c190f"} Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.518942 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.519073 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.019037933 +0000 UTC m=+158.939433525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.519233 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.521374 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.021359378 +0000 UTC m=+158.941754950 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.551989 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.560941 4922 patch_prober.go:28] interesting pod/downloads-7954f5f757-2xxgj container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.561017 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2xxgj" podUID="49208aaf-d742-4bab-9b7e-b883e88096f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.569156 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-rllbn"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.583406 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-2xxgj" podStartSLOduration=139.583389124 podStartE2EDuration="2m19.583389124s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:13.582932722 +0000 UTC m=+158.503328304" watchObservedRunningTime="2025-11-28 06:55:13.583389124 +0000 UTC m=+158.503784706" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.620558 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.621206 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.121189573 +0000 UTC m=+159.041585155 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.679288 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh"] Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.706521 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.725143 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.734192 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.234152305 +0000 UTC m=+159.154547887 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.765549 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.811816 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:13 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:13 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:13 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.811891 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.836733 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.837098 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.337079667 +0000 UTC m=+159.257475249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:13 crc kubenswrapper[4922]: I1128 06:55:13.937893 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:13 crc kubenswrapper[4922]: E1128 06:55:13.938490 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.438477675 +0000 UTC m=+159.358873257 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.020077 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" podStartSLOduration=140.020062249 podStartE2EDuration="2m20.020062249s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:14.018381233 +0000 UTC m=+158.938776815" watchObservedRunningTime="2025-11-28 06:55:14.020062249 +0000 UTC m=+158.940457831" Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.043663 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.043992 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.543971879 +0000 UTC m=+159.464367461 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.112820 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-7tnsd" podStartSLOduration=140.112803095 podStartE2EDuration="2m20.112803095s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:14.111811888 +0000 UTC m=+159.032207470" watchObservedRunningTime="2025-11-28 06:55:14.112803095 +0000 UTC m=+159.033198677" Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.133611 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4b5vr" podStartSLOduration=140.133589838 podStartE2EDuration="2m20.133589838s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:14.132829006 +0000 UTC m=+159.053224598" watchObservedRunningTime="2025-11-28 06:55:14.133589838 +0000 UTC m=+159.053985420" Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.151500 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.151953 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.651940371 +0000 UTC m=+159.572335943 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.192898 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z"] Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.225073 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-ht2sn"] Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.252998 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.253419 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.753401141 +0000 UTC m=+159.673796723 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.267855 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z4zvd"] Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.269746 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-fzxnn"] Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.351989 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" podStartSLOduration=140.351968702 podStartE2EDuration="2m20.351968702s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:14.351746415 +0000 UTC m=+159.272142007" watchObservedRunningTime="2025-11-28 06:55:14.351968702 +0000 UTC m=+159.272364284" Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.354505 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.354845 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.854833332 +0000 UTC m=+159.775228914 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.444120 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" event={"ID":"6b8f1ab0-54be-4dad-97b3-a69b288dea67","Type":"ContainerStarted","Data":"d6056659539a0909b1fc2e71cfca256ffbd41e93269b761b7d130ced880bf7ed"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.451833 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" event={"ID":"648b4624-2324-4ec1-aa88-5822c9f89034","Type":"ContainerStarted","Data":"266db27d392bdd3aeee54b12233837cef1d1cd36663b0bb25c3845a7a7a83d4d"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.451878 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" event={"ID":"648b4624-2324-4ec1-aa88-5822c9f89034","Type":"ContainerStarted","Data":"9d0970108dae51022ae518800efc1790f7f10e6cadb5e0f07ea4feeadf332cb3"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.458212 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.458316 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.958294018 +0000 UTC m=+159.878689590 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.458380 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" event={"ID":"059ac1e5-20d6-4a32-846e-d427680a560f","Type":"ContainerStarted","Data":"7f7b558af68e1ba9e272a35d538e06a8d65817980be795904fb170e2ed2604ad"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.458571 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.459011 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:14.958994078 +0000 UTC m=+159.879389660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.459251 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" event={"ID":"e77ea863-52c9-4f43-968f-73ba31a6b0de","Type":"ContainerStarted","Data":"fdb1c4d038603ed0593c8dcbde91df907629fd8da820563cc0ec21f897a8a424"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.461327 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" event={"ID":"65e269c0-8d17-410e-ae5c-4ff9ba053bf9","Type":"ContainerStarted","Data":"ff1cc70671485574d3225c856641cf14d4f07338e1bb77a4b8db69897356b9fd"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.461347 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" event={"ID":"65e269c0-8d17-410e-ae5c-4ff9ba053bf9","Type":"ContainerStarted","Data":"376ff7f4ac8cb15f8b2ce76409f6fe49fc2882ac2c6285d5e5c9c0eac37d6fca"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.463890 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-xq92w" event={"ID":"7c07a370-f5fd-46a2-b0a6-06a2850b5233","Type":"ContainerStarted","Data":"ef04a9abba1454df4307caa0511a0a659a040fd83d0d37b8659a08bdc7becac6"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.463922 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-xq92w" event={"ID":"7c07a370-f5fd-46a2-b0a6-06a2850b5233","Type":"ContainerStarted","Data":"41169759f810399f3003d0867147f88533135a563888cc0dc230770f6a6a484c"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.471050 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" event={"ID":"fb28612c-20e3-4319-9db8-dae18a593a1e","Type":"ContainerStarted","Data":"933f5a0e43a4419ee47d3ab1425f4091da96cb184aa69c549d18ba055d590f25"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.473823 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" event={"ID":"da14c12f-6323-4600-9d9e-b2e5d53e1ecb","Type":"ContainerStarted","Data":"b406b3e4c63d5eeaf7f0b4285863820e7bba52507764318d9ab4bcb46561bd6f"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.481979 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" event={"ID":"b5c2983d-2d59-4691-b2cd-130bfcd3e18c","Type":"ContainerStarted","Data":"a4b4b7dfb04d07936694cad50cf020c2b3c09609b48f4701f7fdbcba26995393"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.487343 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" event={"ID":"d0751583-7dc1-4547-9240-39d874a6ca87","Type":"ContainerStarted","Data":"75057f1e29e2aef992f6b59172a51079def1476c4e0cbaee103c8967feba3d3a"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.491617 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" event={"ID":"9ebbc267-ceb6-444d-b767-493e54d573b6","Type":"ContainerStarted","Data":"78270f1989feab91b759c2ae9616b9e9ec0d9fb1f746c0293d76dc33dc7466e4"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.491695 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.491707 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" event={"ID":"9ebbc267-ceb6-444d-b767-493e54d573b6","Type":"ContainerStarted","Data":"23190f3fcf1d069c809dc3ff3eba53d38045688a3a871c563cf589157f5902d5"} Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.496529 4922 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-jf9pl container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" start-of-body= Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.496634 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" podUID="9ebbc267-ceb6-444d-b767-493e54d573b6" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.16:8443/healthz\": dial tcp 10.217.0.16:8443: connect: connection refused" Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.496553 4922 patch_prober.go:28] interesting pod/downloads-7954f5f757-2xxgj container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.496741 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2xxgj" podUID="49208aaf-d742-4bab-9b7e-b883e88096f6" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.560551 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.564943 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.064915523 +0000 UTC m=+159.985311105 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.665642 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-xbhv9" podStartSLOduration=140.665623872 podStartE2EDuration="2m20.665623872s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:14.664453559 +0000 UTC m=+159.584849141" watchObservedRunningTime="2025-11-28 06:55:14.665623872 +0000 UTC m=+159.586019454" Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.668445 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.669454 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.169438049 +0000 UTC m=+160.089833631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.712034 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:14 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:14 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:14 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.712122 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:14 crc kubenswrapper[4922]: W1128 06:55:14.761779 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd93cb7d_1de5_4e04_957a_a91d0bb26134.slice/crio-a400a9959a75a34237aca776bdfb2086a77936c597152ad6a2494299f2f0bda6 WatchSource:0}: Error finding container a400a9959a75a34237aca776bdfb2086a77936c597152ad6a2494299f2f0bda6: Status 404 returned error can't find the container with id a400a9959a75a34237aca776bdfb2086a77936c597152ad6a2494299f2f0bda6 Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.769322 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.769832 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.269814859 +0000 UTC m=+160.190210441 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.877799 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.878400 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.378386279 +0000 UTC m=+160.298781861 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:14 crc kubenswrapper[4922]: I1128 06:55:14.978737 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:14 crc kubenswrapper[4922]: E1128 06:55:14.982287 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.482258507 +0000 UTC m=+160.402654089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.081885 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.082283 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.582268516 +0000 UTC m=+160.502664098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.138513 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-w94qh" podStartSLOduration=141.13848933 podStartE2EDuration="2m21.13848933s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:15.135680401 +0000 UTC m=+160.056075993" watchObservedRunningTime="2025-11-28 06:55:15.13848933 +0000 UTC m=+160.058884932" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.178059 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" podStartSLOduration=141.178036238 podStartE2EDuration="2m21.178036238s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:15.175317872 +0000 UTC m=+160.095713474" watchObservedRunningTime="2025-11-28 06:55:15.178036238 +0000 UTC m=+160.098431820" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.182906 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.183632 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.683608174 +0000 UTC m=+160.604003756 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.228163 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-xq92w" podStartSLOduration=5.22814299 podStartE2EDuration="5.22814299s" podCreationTimestamp="2025-11-28 06:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:15.224189849 +0000 UTC m=+160.144585431" watchObservedRunningTime="2025-11-28 06:55:15.22814299 +0000 UTC m=+160.148538572" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.287992 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.288321 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.788307785 +0000 UTC m=+160.708703367 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.361893 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.385413 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2t2xx"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.391787 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.392075 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:15.892058199 +0000 UTC m=+160.812453781 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.499072 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.500365 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.000159436 +0000 UTC m=+160.920555008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.548134 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.601882 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.602402 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.102384197 +0000 UTC m=+161.022779779 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.635398 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-z4zvd" event={"ID":"fd93cb7d-1de5-4e04-957a-a91d0bb26134","Type":"ContainerStarted","Data":"0f523b4929f658621aabb806b9d5039912fabc0eefc04698693addb7034d3ca4"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.635449 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-z4zvd" event={"ID":"fd93cb7d-1de5-4e04-957a-a91d0bb26134","Type":"ContainerStarted","Data":"a400a9959a75a34237aca776bdfb2086a77936c597152ad6a2494299f2f0bda6"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.636353 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.638539 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.640339 4922 patch_prober.go:28] interesting pod/console-operator-58897d9998-z4zvd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/readyz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.640389 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-z4zvd" podUID="fd93cb7d-1de5-4e04-957a-a91d0bb26134" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/readyz\": dial tcp 10.217.0.38:8443: connect: connection refused" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.678883 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-m6bkw"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.684864 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" event={"ID":"6b8f1ab0-54be-4dad-97b3-a69b288dea67","Type":"ContainerStarted","Data":"8b292a8b701f28b30b37dfd7a4fe99f9f06827d94f37bc41b27fc58f35d7a3e2"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.704253 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.704551 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.204537677 +0000 UTC m=+161.124933259 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.705947 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" event={"ID":"d0751583-7dc1-4547-9240-39d874a6ca87","Type":"ContainerStarted","Data":"68d82a8f77080bda8dfd39210501e8ea6db686d2197be9f242550a311f4ae278"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.722409 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:15 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:15 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:15 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.722464 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.724425 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" event={"ID":"468ca9dc-6af3-47ae-8c7c-b9338ceae695","Type":"ContainerStarted","Data":"80ef66c01e0ae1b1ab60bf33a88fb5ef7d8a72fa40785ae6474a77dd2246a466"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.736258 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.753751 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" event={"ID":"d952449e-c825-43d3-a591-0be473db6a53","Type":"ContainerStarted","Data":"b1ef3378b6d83028bdda421db6b41b59b763fc32d7b520b0bf701c78a42fccae"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.754972 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.777424 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" event={"ID":"7842430c-0bd1-459a-9840-f4c4d31baa52","Type":"ContainerStarted","Data":"36152730ee275a5f171ba3553d7b10a947976553a5037b9c3a94a21cab970157"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.777466 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" event={"ID":"7842430c-0bd1-459a-9840-f4c4d31baa52","Type":"ContainerStarted","Data":"ff007c955d4e8c76f672902c29fff55885bb3a868ea737fd9999c7fca1b46a56"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.796443 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-s8ws4"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.805326 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.806623 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.306605275 +0000 UTC m=+161.227000857 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.812257 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.818241 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" event={"ID":"059ac1e5-20d6-4a32-846e-d427680a560f","Type":"ContainerStarted","Data":"eb19f9610f6f71a3d731ca4cede484031fdb29bae92a5da19f42c44e084361e0"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.903992 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" event={"ID":"b5c2983d-2d59-4691-b2cd-130bfcd3e18c","Type":"ContainerStarted","Data":"d030e9ed5ab4a526e4d62816a05c1c82cee6ae8bf8477c534b65f566c2876725"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.905504 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" podStartSLOduration=141.905493103 podStartE2EDuration="2m21.905493103s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:15.904920917 +0000 UTC m=+160.825316499" watchObservedRunningTime="2025-11-28 06:55:15.905493103 +0000 UTC m=+160.825888685" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.908672 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:15 crc kubenswrapper[4922]: E1128 06:55:15.910490 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.410470152 +0000 UTC m=+161.330865734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.941265 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" event={"ID":"e77ea863-52c9-4f43-968f-73ba31a6b0de","Type":"ContainerStarted","Data":"f54f25c41c9b7e216cbb1e332b92aea3bdd6139fd170f7f5da47304d751e2c31"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.941605 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-fzxnn" podStartSLOduration=142.941585633 podStartE2EDuration="2m22.941585633s" podCreationTimestamp="2025-11-28 06:52:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:15.940482723 +0000 UTC m=+160.860878325" watchObservedRunningTime="2025-11-28 06:55:15.941585633 +0000 UTC m=+160.861981215" Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.960624 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-p5q7c"] Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.964609 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" event={"ID":"65e269c0-8d17-410e-ae5c-4ff9ba053bf9","Type":"ContainerStarted","Data":"4c3cec9918ba48f08fe1613d6c117d196e409316340465ad638d81726d489b09"} Nov 28 06:55:15 crc kubenswrapper[4922]: I1128 06:55:15.981028 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.004960 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" event={"ID":"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf","Type":"ContainerStarted","Data":"d71b52d125b25de2187af4c200389c488b7533ef27d4ecb9c11f491843723187"} Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.063544 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" event={"ID":"c16ef766-7ac2-43bc-a1fa-ea9fc1e61edf","Type":"ContainerStarted","Data":"a6dd9104674dd2fca246fb70566716430e7a792a9111f307fd7c808b25b1f514"} Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.044165 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.544131834 +0000 UTC m=+161.464527416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.043880 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.065041 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.068879 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.568851877 +0000 UTC m=+161.489247449 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.096396 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" podStartSLOduration=142.096367936 podStartE2EDuration="2m22.096367936s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.045576594 +0000 UTC m=+160.965972186" watchObservedRunningTime="2025-11-28 06:55:16.096367936 +0000 UTC m=+161.016763508" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.112671 4922 generic.go:334] "Generic (PLEG): container finished" podID="da14c12f-6323-4600-9d9e-b2e5d53e1ecb" containerID="7987c2ea97c14a4ca4f6f2d66856bab881311be1bf699babcb89650689143cad" exitCode=0 Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.113345 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" event={"ID":"da14c12f-6323-4600-9d9e-b2e5d53e1ecb","Type":"ContainerDied","Data":"7987c2ea97c14a4ca4f6f2d66856bab881311be1bf699babcb89650689143cad"} Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.149809 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-z4zvd" podStartSLOduration=142.149789232 podStartE2EDuration="2m22.149789232s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.08149431 +0000 UTC m=+161.001889892" watchObservedRunningTime="2025-11-28 06:55:16.149789232 +0000 UTC m=+161.070184814" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.151975 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-pp742" podStartSLOduration=142.151969383 podStartE2EDuration="2m22.151969383s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.141962763 +0000 UTC m=+161.062358345" watchObservedRunningTime="2025-11-28 06:55:16.151969383 +0000 UTC m=+161.072364955" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.155388 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.155425 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" event={"ID":"d58dc2f1-e069-4b63-8371-7c6b2735adab","Type":"ContainerStarted","Data":"e450414a89daffff87a00bc073a0394b59ed1e145b852498147dc19fec72a93c"} Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.162641 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.166310 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.167064 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.667046856 +0000 UTC m=+161.587442438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.177601 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" event={"ID":"7a50e5be-5b15-472d-a504-3dc449b474e6","Type":"ContainerStarted","Data":"07bc42b9e7fb2ec96b70f7916bdd2a626738382e3bc611dca594d5ac1bce6955"} Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.178250 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.179781 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lppzs"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.180738 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.183118 4922 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2t2xx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.183168 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.187045 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" event={"ID":"4fa6813c-f797-44b6-8779-7cfc802379fc","Type":"ContainerStarted","Data":"58f8c931c747136bc9c99d1d81bd8105601bc42e41334c3aed1d6270a8697a89"} Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.187385 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.188511 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-djvcb"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.202788 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-k2x5w"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.209287 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.209512 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.233941 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-h297g"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.233989 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lppzs"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.240347 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-62v62"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.251547 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-gzqmt" podStartSLOduration=142.251415987 podStartE2EDuration="2m22.251415987s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.195896493 +0000 UTC m=+161.116292075" watchObservedRunningTime="2025-11-28 06:55:16.251415987 +0000 UTC m=+161.171811569" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.260648 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-nm899" podStartSLOduration=142.260630615 podStartE2EDuration="2m22.260630615s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.246728916 +0000 UTC m=+161.167124498" watchObservedRunningTime="2025-11-28 06:55:16.260630615 +0000 UTC m=+161.181026197" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.262430 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.267971 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc26q\" (UniqueName: \"kubernetes.io/projected/b82398ae-40c9-40dc-8775-65f999dac1a8-kube-api-access-xc26q\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.268064 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-utilities\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.268134 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.268194 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-catalog-content\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.270568 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.770550403 +0000 UTC m=+161.690945985 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.296507 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" event={"ID":"fb28612c-20e3-4319-9db8-dae18a593a1e","Type":"ContainerStarted","Data":"7fc297353fdf7ea79d11758cfbe5fc2272c874a762eaf99f751fb18dee20255b"} Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.296653 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.314416 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-jf9pl" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.344156 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-5pc2z" podStartSLOduration=142.344139863 podStartE2EDuration="2m22.344139863s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.342915318 +0000 UTC m=+161.263310900" watchObservedRunningTime="2025-11-28 06:55:16.344139863 +0000 UTC m=+161.264535435" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.369154 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.369424 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-catalog-content\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.369467 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc26q\" (UniqueName: \"kubernetes.io/projected/b82398ae-40c9-40dc-8775-65f999dac1a8-kube-api-access-xc26q\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.369537 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-utilities\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.370110 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.870066869 +0000 UTC m=+161.790462451 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.372520 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-utilities\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.374908 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-catalog-content\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.422184 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" podStartSLOduration=142.422162207 podStartE2EDuration="2m22.422162207s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.421043746 +0000 UTC m=+161.341439328" watchObservedRunningTime="2025-11-28 06:55:16.422162207 +0000 UTC m=+161.342557789" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.438542 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc26q\" (UniqueName: \"kubernetes.io/projected/b82398ae-40c9-40dc-8775-65f999dac1a8-kube-api-access-xc26q\") pod \"certified-operators-lppzs\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.472051 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.472470 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:16.972454196 +0000 UTC m=+161.892849778 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.473873 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" podStartSLOduration=142.473862255 podStartE2EDuration="2m22.473862255s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.472831896 +0000 UTC m=+161.393227488" watchObservedRunningTime="2025-11-28 06:55:16.473862255 +0000 UTC m=+161.394257837" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.553581 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.590311 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ncbvn"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.608803 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.609011 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ncbvn"] Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.609191 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.109174002 +0000 UTC m=+162.029569584 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.618022 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.646712 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" podStartSLOduration=142.646692203 podStartE2EDuration="2m22.646692203s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:16.640266663 +0000 UTC m=+161.560662255" watchObservedRunningTime="2025-11-28 06:55:16.646692203 +0000 UTC m=+161.567087785" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.711488 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-utilities\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.711546 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvhbg\" (UniqueName: \"kubernetes.io/projected/d2e80403-da8b-49c0-9007-aa0c6c1be47a-kube-api-access-tvhbg\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.711597 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-catalog-content\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.711627 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.711916 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.211904569 +0000 UTC m=+162.132300151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.713652 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:16 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:16 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:16 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.713721 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.776647 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fww6r"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.780093 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.782552 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.813353 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.813797 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-catalog-content\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.813877 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-utilities\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.813912 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvhbg\" (UniqueName: \"kubernetes.io/projected/d2e80403-da8b-49c0-9007-aa0c6c1be47a-kube-api-access-tvhbg\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.814189 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.314173052 +0000 UTC m=+162.234568634 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.814564 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-catalog-content\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.814791 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-utilities\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.820779 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fww6r"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.865431 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvhbg\" (UniqueName: \"kubernetes.io/projected/d2e80403-da8b-49c0-9007-aa0c6c1be47a-kube-api-access-tvhbg\") pod \"certified-operators-ncbvn\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.914714 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-catalog-content\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.915029 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w28p\" (UniqueName: \"kubernetes.io/projected/23771385-c219-4713-9c79-d4802b2f13a7-kube-api-access-7w28p\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.915056 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.915119 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-utilities\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:16 crc kubenswrapper[4922]: E1128 06:55:16.915484 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.415470488 +0000 UTC m=+162.335866060 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.960001 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vzlv9"] Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.967816 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:16 crc kubenswrapper[4922]: I1128 06:55:16.977541 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.016920 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.017168 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.517133334 +0000 UTC m=+162.437528916 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.017491 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-utilities\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.017653 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-catalog-content\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.017775 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.017905 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7w28p\" (UniqueName: \"kubernetes.io/projected/23771385-c219-4713-9c79-d4802b2f13a7-kube-api-access-7w28p\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.018122 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-utilities\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.018899 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-catalog-content\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.019301 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.519283804 +0000 UTC m=+162.439679386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.066428 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w28p\" (UniqueName: \"kubernetes.io/projected/23771385-c219-4713-9c79-d4802b2f13a7-kube-api-access-7w28p\") pod \"community-operators-fww6r\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.085300 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vzlv9"] Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.114136 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.118823 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.119253 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-catalog-content\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.119365 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbj55\" (UniqueName: \"kubernetes.io/projected/09d238cf-e3a6-461a-bdf8-f598c91385a3-kube-api-access-sbj55\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.119506 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-utilities\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.119629 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.120428 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.620397155 +0000 UTC m=+162.540792737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.134026 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/709beb43-ed88-4a0a-b384-0c463e469964-metrics-certs\") pod \"network-metrics-daemon-9kfr9\" (UID: \"709beb43-ed88-4a0a-b384-0c463e469964\") " pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.142209 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.142579 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.182573 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.227672 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.227709 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-catalog-content\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.227728 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbj55\" (UniqueName: \"kubernetes.io/projected/09d238cf-e3a6-461a-bdf8-f598c91385a3-kube-api-access-sbj55\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.227745 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-utilities\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.228141 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-utilities\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.228452 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.728439179 +0000 UTC m=+162.648834761 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.228676 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-catalog-content\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.315886 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbj55\" (UniqueName: \"kubernetes.io/projected/09d238cf-e3a6-461a-bdf8-f598c91385a3-kube-api-access-sbj55\") pod \"community-operators-vzlv9\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.322341 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-9kfr9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.336469 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.337672 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.338042 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.838022638 +0000 UTC m=+162.758418220 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.384177 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" event={"ID":"b5c2983d-2d59-4691-b2cd-130bfcd3e18c","Type":"ContainerStarted","Data":"3ab2ed668f014af40417f4f6b425fc7a73311522e35af3d3e66d15d88dba2411"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.391964 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-62v62" event={"ID":"03dc5df6-dfb3-4839-9c45-71894d1b547b","Type":"ContainerStarted","Data":"609ab26fbd177f4b0790762d1bd784c3cdcff0153c7754e0a9a7cb0bb7a35d1e"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.439042 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.439411 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:17.939397495 +0000 UTC m=+162.859793077 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.442328 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" event={"ID":"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7","Type":"ContainerStarted","Data":"5366493af2f10660dd95061bfce98063ceaebfa167ed28109273386f51853d3a"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.457812 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" event={"ID":"e2e57fd7-85d0-4872-8737-c6423b16b702","Type":"ContainerStarted","Data":"a5084d1504b291b213f2c07d48938ff9be24e8f6be69e79dd9fd1c3006edcf1f"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.458201 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" event={"ID":"e2e57fd7-85d0-4872-8737-c6423b16b702","Type":"ContainerStarted","Data":"11f17ef264e8777ef35374a584c208538485109fe9a5a34c2a2f9630124fe741"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.494794 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" event={"ID":"4fa6813c-f797-44b6-8779-7cfc802379fc","Type":"ContainerStarted","Data":"e830d35800f8154c664f6e398b4569feb41be99e76e2c47f5d4dadb4f67a60cc"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.494859 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" event={"ID":"4fa6813c-f797-44b6-8779-7cfc802379fc","Type":"ContainerStarted","Data":"f98b557faacc2ae8eaf7bd2475e74ae1bd486ce67c847901d180661a9c1c610b"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.529747 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-qgppj" podStartSLOduration=143.529728534 podStartE2EDuration="2m23.529728534s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.528707686 +0000 UTC m=+162.449103268" watchObservedRunningTime="2025-11-28 06:55:17.529728534 +0000 UTC m=+162.450124116" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.530273 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-rllbn" podStartSLOduration=143.530268729 podStartE2EDuration="2m23.530268729s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.430869847 +0000 UTC m=+162.351265429" watchObservedRunningTime="2025-11-28 06:55:17.530268729 +0000 UTC m=+162.450664311" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.546542 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" event={"ID":"da14c12f-6323-4600-9d9e-b2e5d53e1ecb","Type":"ContainerStarted","Data":"6068e058fc7fc3023b2eb00d0ba3867f07093c0360a9e8e3199848f529a34771"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.548006 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.548477 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.563731 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.063695195 +0000 UTC m=+162.984090777 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.601948 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" podStartSLOduration=143.601919185 podStartE2EDuration="2m23.601919185s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.579526308 +0000 UTC m=+162.499921900" watchObservedRunningTime="2025-11-28 06:55:17.601919185 +0000 UTC m=+162.522314927" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.603535 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lppzs"] Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.621532 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" event={"ID":"cd6df89d-cec7-40af-acae-2b1308378819","Type":"ContainerStarted","Data":"63a13209e8a7e819c1b76b35b7f19c67b9ebf255dc38de8ccbba53673d116ae8"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.621800 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" event={"ID":"cd6df89d-cec7-40af-acae-2b1308378819","Type":"ContainerStarted","Data":"1b55becc608446584c85735c7b80f244d12f2ffbda8ca5817b18d9764f59b394"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.623114 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.655399 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.657481 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.15746494 +0000 UTC m=+163.077860512 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: W1128 06:55:17.679889 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb82398ae_40c9_40dc_8775_65f999dac1a8.slice/crio-dfc73622d6241d40d6f03ac756d8c62d47f09fab94e72fb742028afafd600817 WatchSource:0}: Error finding container dfc73622d6241d40d6f03ac756d8c62d47f09fab94e72fb742028afafd600817: Status 404 returned error can't find the container with id dfc73622d6241d40d6f03ac756d8c62d47f09fab94e72fb742028afafd600817 Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.684431 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" event={"ID":"fb28612c-20e3-4319-9db8-dae18a593a1e","Type":"ContainerStarted","Data":"2879f824dc82fae10f2537df2458107eb17a5d65252897f1a1892d23f6c07abf"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.692469 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" event={"ID":"83a5410b-be02-42e8-9b5d-7d476dc779fc","Type":"ContainerStarted","Data":"a500dcb2bad715f78de5f9ed2a54fbd0a0f24f60e58a2d74ad1bf53a4b6d1d74"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.692520 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" event={"ID":"83a5410b-be02-42e8-9b5d-7d476dc779fc","Type":"ContainerStarted","Data":"cccc9d54a41cc387417c075da08f5dad7bfabdc3021334284d8d88e8e0309672"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.713457 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" event={"ID":"72c05220-a1c6-4f3a-a8f1-c588a4e062b0","Type":"ContainerStarted","Data":"a04d21eee936b955617e72bd1553e1026f37ca0642b52cb91a1fa91da6d6f164"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.713526 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" event={"ID":"72c05220-a1c6-4f3a-a8f1-c588a4e062b0","Type":"ContainerStarted","Data":"cd14e2bf3672bbfd80c7d5330b07d281bb7cd61d7f83ae9f5546c45514345373"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.715593 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:17 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:17 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:17 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.715672 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.716839 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" podStartSLOduration=143.716820882 podStartE2EDuration="2m23.716820882s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.714394024 +0000 UTC m=+162.634789606" watchObservedRunningTime="2025-11-28 06:55:17.716820882 +0000 UTC m=+162.637216464" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.719157 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" event={"ID":"d0751583-7dc1-4547-9240-39d874a6ca87","Type":"ContainerStarted","Data":"fdb201f369de05edbc1cbf62797d09a631cb4f26b899bff88331f619ba70ed93"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.720995 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-djvcb" event={"ID":"04c1fcf2-5298-423f-ab9e-56043ced3e2f","Type":"ContainerStarted","Data":"717ad0e59f194a052fc346308db172716c07a399353255c7185c94769ed27541"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.722656 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" event={"ID":"1090992f-7c26-40f9-95c6-eaf05f31fda2","Type":"ContainerStarted","Data":"43065a33e0684f30b5699ce5f8ea9bec4f83d93562b505e66dece5430a49ed5a"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.723550 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.724072 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-8nrlh" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.746969 4922 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-r4frz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" start-of-body= Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.756393 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" podUID="1090992f-7c26-40f9-95c6-eaf05f31fda2" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.758371 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.759604 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.259586059 +0000 UTC m=+163.179981641 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.763371 4922 generic.go:334] "Generic (PLEG): container finished" podID="468ca9dc-6af3-47ae-8c7c-b9338ceae695" containerID="e6ab3c19726d75f3879cb9c21076820f644c2404777cbd95f7e0e46f5567a165" exitCode=0 Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.763404 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" event={"ID":"468ca9dc-6af3-47ae-8c7c-b9338ceae695","Type":"ContainerDied","Data":"e6ab3c19726d75f3879cb9c21076820f644c2404777cbd95f7e0e46f5567a165"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.805670 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-p5q7c" event={"ID":"49678f48-cc19-4c5d-8b9e-2926f02770d9","Type":"ContainerStarted","Data":"d37fae178aaf3d062f0d6de40a52289c21f62cab60493adbb9551d6a9e127413"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.825753 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" event={"ID":"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7","Type":"ContainerStarted","Data":"86ed3e21562b7da91b6abf48e8a49b6e163da3d8e1fa153cc17c1941c3989889"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.844431 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sfzn5" podStartSLOduration=143.817701896 podStartE2EDuration="2m23.817701896s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.815430592 +0000 UTC m=+162.735826174" watchObservedRunningTime="2025-11-28 06:55:17.817701896 +0000 UTC m=+162.738097478" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.844768 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-k2x5w" podStartSLOduration=143.844748373 podStartE2EDuration="2m23.844748373s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.783111757 +0000 UTC m=+162.703507339" watchObservedRunningTime="2025-11-28 06:55:17.844748373 +0000 UTC m=+162.765143955" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.860500 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.861252 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-98r22" podStartSLOduration=143.861198693 podStartE2EDuration="2m23.861198693s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.859899537 +0000 UTC m=+162.780295119" watchObservedRunningTime="2025-11-28 06:55:17.861198693 +0000 UTC m=+162.781594275" Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.861305 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.361276585 +0000 UTC m=+163.281672167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.878556 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" event={"ID":"7a50e5be-5b15-472d-a504-3dc449b474e6","Type":"ContainerStarted","Data":"e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.880386 4922 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2t2xx container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.880428 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.901580 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" event={"ID":"3f39970d-138c-4369-838c-af7074913d3b","Type":"ContainerStarted","Data":"35f1c3b5869b7be3f8e4af27e5f7ad44cb7bd9a99c5fc1077e877997a031f894"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.909778 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" event={"ID":"71cf79c0-841b-4024-9b3d-9c49ca7d89d9","Type":"ContainerStarted","Data":"0a3ad6297bd5e825e6d232f9adb292ae0e5ac267a10d422074af1700fee4a314"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.910285 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" event={"ID":"71cf79c0-841b-4024-9b3d-9c49ca7d89d9","Type":"ContainerStarted","Data":"8385ce92eef9616e78d95202fc1223f77624460fa0dfb0dd6511449abdfc8e89"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.924741 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-p5q7c" podStartSLOduration=7.924722612 podStartE2EDuration="7.924722612s" podCreationTimestamp="2025-11-28 06:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.922922941 +0000 UTC m=+162.843318523" watchObservedRunningTime="2025-11-28 06:55:17.924722612 +0000 UTC m=+162.845118194" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.950236 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" event={"ID":"765b0f79-4316-4aab-90fa-a2aaa84380f0","Type":"ContainerStarted","Data":"0cb5619676c403739d55e20f2d790f7119e146f00eb6cb6ab6e58f20f205afb0"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.950290 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" event={"ID":"765b0f79-4316-4aab-90fa-a2aaa84380f0","Type":"ContainerStarted","Data":"d0bab58658aaa82fa618f04f10d10548e4dda2c9b36b6093df3e91dca8077482"} Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.962859 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:17 crc kubenswrapper[4922]: E1128 06:55:17.968630 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.468599881 +0000 UTC m=+163.388995463 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.980634 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" podStartSLOduration=143.980608507 podStartE2EDuration="2m23.980608507s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:17.952663244 +0000 UTC m=+162.873058826" watchObservedRunningTime="2025-11-28 06:55:17.980608507 +0000 UTC m=+162.901004089" Nov 28 06:55:17 crc kubenswrapper[4922]: I1128 06:55:17.981447 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fww6r"] Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.009711 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" event={"ID":"1f79995c-e40b-4f4a-9001-0054f046a944","Type":"ContainerStarted","Data":"02f0d051086aa5e2adb923db27ac9f883c4b1b89a12e3497a78a82638310b529"} Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.009783 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" event={"ID":"1f79995c-e40b-4f4a-9001-0054f046a944","Type":"ContainerStarted","Data":"74dd972b1672a647a305865710fbda84acfdff5db30278dfccd416c7c9a5327d"} Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.033188 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" podStartSLOduration=144.033162788 podStartE2EDuration="2m24.033162788s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:18.030810892 +0000 UTC m=+162.951206474" watchObservedRunningTime="2025-11-28 06:55:18.033162788 +0000 UTC m=+162.953558370" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.047751 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ln9mh" event={"ID":"059ac1e5-20d6-4a32-846e-d427680a560f","Type":"ContainerStarted","Data":"7b6b67ded12c6ef2f08fdecc7853dcacb50835d2dd7d129f2a96f828ff4d1016"} Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.049855 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h297g" event={"ID":"7de83d0f-3269-4343-b2a9-398b8d4af4fc","Type":"ContainerStarted","Data":"8e889a92d7a0a02a2add9de486cce47ce6ab541fcc50cda3e48aa3cddbb0e595"} Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.054093 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" event={"ID":"c699c950-dfd4-4513-8c2a-3f38c8094d22","Type":"ContainerStarted","Data":"4f4b04d89a17e958082656711b720b6307fbd423d9a16f4450676286e535abca"} Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.054124 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" event={"ID":"c699c950-dfd4-4513-8c2a-3f38c8094d22","Type":"ContainerStarted","Data":"4b2a2c735957c6fd7b09a0c8ce8efe4ba0a8d6e3ec5d3a44965c18b751b8ecd5"} Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.063431 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-z4zvd" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.073370 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.074179 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.574163255 +0000 UTC m=+163.494558837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.081202 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hgtbm" podStartSLOduration=144.081170752 podStartE2EDuration="2m24.081170752s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:18.07290137 +0000 UTC m=+162.993296952" watchObservedRunningTime="2025-11-28 06:55:18.081170752 +0000 UTC m=+163.001566324" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.088527 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-9t6kr" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.126570 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" podStartSLOduration=144.126553942 podStartE2EDuration="2m24.126553942s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:18.125896574 +0000 UTC m=+163.046292156" watchObservedRunningTime="2025-11-28 06:55:18.126553942 +0000 UTC m=+163.046949524" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.174893 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.175095 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.67506061 +0000 UTC m=+163.595456192 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.175365 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.180341 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.680322377 +0000 UTC m=+163.600717959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.240044 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-h297g" podStartSLOduration=144.240024209 podStartE2EDuration="2m24.240024209s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:18.239113683 +0000 UTC m=+163.159509265" watchObservedRunningTime="2025-11-28 06:55:18.240024209 +0000 UTC m=+163.160419791" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.273707 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-9kfr9"] Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.277959 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.278321 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.77830215 +0000 UTC m=+163.698697732 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.344585 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-m6bkw" podStartSLOduration=144.344556706 podStartE2EDuration="2m24.344556706s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:18.291593053 +0000 UTC m=+163.211988635" watchObservedRunningTime="2025-11-28 06:55:18.344556706 +0000 UTC m=+163.264952298" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.353009 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d9mhp"] Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.355249 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.362525 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.399908 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.400706 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:18.900688497 +0000 UTC m=+163.821084079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.415376 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9mhp"] Nov 28 06:55:18 crc kubenswrapper[4922]: W1128 06:55:18.426080 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2e80403_da8b_49c0_9007_aa0c6c1be47a.slice/crio-f65d46290a1857b2ab7a69ece4e57ea2537c61a2bc5def9fa0cceb51caf59333 WatchSource:0}: Error finding container f65d46290a1857b2ab7a69ece4e57ea2537c61a2bc5def9fa0cceb51caf59333: Status 404 returned error can't find the container with id f65d46290a1857b2ab7a69ece4e57ea2537c61a2bc5def9fa0cceb51caf59333 Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.431513 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ncbvn"] Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.485465 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vzlv9"] Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.503758 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.503913 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l27pt\" (UniqueName: \"kubernetes.io/projected/5254d7c5-8faa-4ede-a82a-210426648d02-kube-api-access-l27pt\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.503953 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-catalog-content\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.504002 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-utilities\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.504132 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.004115363 +0000 UTC m=+163.924510945 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.605113 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l27pt\" (UniqueName: \"kubernetes.io/projected/5254d7c5-8faa-4ede-a82a-210426648d02-kube-api-access-l27pt\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.605276 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-catalog-content\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.605390 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.605479 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-utilities\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.605935 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-utilities\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.606494 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-catalog-content\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.606808 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.106795936 +0000 UTC m=+164.027191518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.648432 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l27pt\" (UniqueName: \"kubernetes.io/projected/5254d7c5-8faa-4ede-a82a-210426648d02-kube-api-access-l27pt\") pod \"redhat-marketplace-d9mhp\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.712073 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.712383 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.212364892 +0000 UTC m=+164.132760474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.712412 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.712686 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.212680151 +0000 UTC m=+164.133075733 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.720803 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:18 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:18 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:18 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.720876 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.741101 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.748343 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lftkx"] Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.749355 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.813472 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.813664 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9phc\" (UniqueName: \"kubernetes.io/projected/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-kube-api-access-b9phc\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.813707 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-catalog-content\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.813761 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-utilities\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.813879 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.313860673 +0000 UTC m=+164.234256255 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.842141 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lftkx"] Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.915160 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9phc\" (UniqueName: \"kubernetes.io/projected/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-kube-api-access-b9phc\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.915209 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.915254 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-catalog-content\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.915306 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-utilities\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.915902 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-utilities\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.917850 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-catalog-content\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:18 crc kubenswrapper[4922]: E1128 06:55:18.918001 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.417978799 +0000 UTC m=+164.338374371 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:18 crc kubenswrapper[4922]: I1128 06:55:18.961985 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9phc\" (UniqueName: \"kubernetes.io/projected/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-kube-api-access-b9phc\") pod \"redhat-marketplace-lftkx\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.016581 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.016962 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.516928579 +0000 UTC m=+164.437324161 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.017288 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.017625 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.517608848 +0000 UTC m=+164.438004430 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.083628 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.106412 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-djvcb" event={"ID":"04c1fcf2-5298-423f-ab9e-56043ced3e2f","Type":"ContainerStarted","Data":"48e67177b7632dfc39892cbae7ce05fa418b0190191ce4616d5bec140dd93e18"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.119050 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.119428 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.619410417 +0000 UTC m=+164.539805999 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.154711 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" event={"ID":"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7","Type":"ContainerStarted","Data":"4f489491927c8fc89d56a47c8996b620e1a68cd7356423118aff9bd898fe8945"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.155669 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" event={"ID":"b55d7c5c-1fc1-4c9a-b82b-35dbabbbe8e7","Type":"ContainerStarted","Data":"f40d019d6b9a4b7e6ab34f86d711b49c90a8c752f5b0306dade574258e3ea64e"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.205772 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ncbvn" event={"ID":"d2e80403-da8b-49c0-9007-aa0c6c1be47a","Type":"ContainerStarted","Data":"6f2e930817c447b4c179490d2cf72b43d8a69650d7057e2b014c4b693a67799a"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.205816 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ncbvn" event={"ID":"d2e80403-da8b-49c0-9007-aa0c6c1be47a","Type":"ContainerStarted","Data":"f65d46290a1857b2ab7a69ece4e57ea2537c61a2bc5def9fa0cceb51caf59333"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.209037 4922 generic.go:334] "Generic (PLEG): container finished" podID="23771385-c219-4713-9c79-d4802b2f13a7" containerID="6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9" exitCode=0 Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.209084 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fww6r" event={"ID":"23771385-c219-4713-9c79-d4802b2f13a7","Type":"ContainerDied","Data":"6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.209104 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fww6r" event={"ID":"23771385-c219-4713-9c79-d4802b2f13a7","Type":"ContainerStarted","Data":"2b9ac76de04dedea80e36a70a92cc55dfb61c33cf3ff3369662ad059d4e433e1"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.221466 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.229953 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.729938272 +0000 UTC m=+164.650333854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.232825 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-d6r4k" podStartSLOduration=145.232811353 podStartE2EDuration="2m25.232811353s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:19.195531959 +0000 UTC m=+164.115927541" watchObservedRunningTime="2025-11-28 06:55:19.232811353 +0000 UTC m=+164.153206935" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.257062 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" event={"ID":"468ca9dc-6af3-47ae-8c7c-b9338ceae695","Type":"ContainerStarted","Data":"fbfe3e5bc6d47d0dc68586df2d33adbf1aae1c8aa865c82ef4e58681a4ea92e0"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.261401 4922 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.314931 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-98sbt" event={"ID":"9cfee6d8-0106-4b57-98e3-23eaed2b9ff7","Type":"ContainerStarted","Data":"0c8123faf099a3187b31996c5364bdc8f33b5fcac1fb102ddb1248519321769c"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.322939 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.324266 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.824243322 +0000 UTC m=+164.744638904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.348883 4922 generic.go:334] "Generic (PLEG): container finished" podID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerID="e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3" exitCode=0 Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.348993 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vzlv9" event={"ID":"09d238cf-e3a6-461a-bdf8-f598c91385a3","Type":"ContainerDied","Data":"e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.349023 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vzlv9" event={"ID":"09d238cf-e3a6-461a-bdf8-f598c91385a3","Type":"ContainerStarted","Data":"f3c2e4faafd7483fc2a212d4cbf0a20923c750b335b8cdc56a960bcf1ace663f"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.377336 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-p5q7c" event={"ID":"49678f48-cc19-4c5d-8b9e-2926f02770d9","Type":"ContainerStarted","Data":"0d1b1c69a27a23f40abbb978f0d4c18cf683e6db35f83efb62160cc596e77103"} Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.426583 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:19.926566487 +0000 UTC m=+164.846962069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.426784 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.459204 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" podStartSLOduration=145.45916645 podStartE2EDuration="2m25.45916645s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:19.445840266 +0000 UTC m=+164.366235858" watchObservedRunningTime="2025-11-28 06:55:19.45916645 +0000 UTC m=+164.379562032" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.529521 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.531178 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.031158944 +0000 UTC m=+164.951554526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.545053 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9mhp"] Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.545089 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-7wtcp" event={"ID":"3f39970d-138c-4369-838c-af7074913d3b","Type":"ContainerStarted","Data":"33e74cdb058abb3c981500615c0dc5f68ecc622e25530e0c42c8ec72c518df8b"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.546535 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h297g" event={"ID":"7de83d0f-3269-4343-b2a9-398b8d4af4fc","Type":"ContainerStarted","Data":"500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.548709 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" event={"ID":"e2e57fd7-85d0-4872-8737-c6423b16b702","Type":"ContainerStarted","Data":"276aee23a9f7dbacd5ef0dfc589a1940f692207e55e87d6962249ff0bf7b477e"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.583309 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-54gpd" podStartSLOduration=145.583273794 podStartE2EDuration="2m25.583273794s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:19.580601439 +0000 UTC m=+164.500997021" watchObservedRunningTime="2025-11-28 06:55:19.583273794 +0000 UTC m=+164.503669376" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.588535 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" event={"ID":"709beb43-ed88-4a0a-b384-0c463e469964","Type":"ContainerStarted","Data":"13f80e61c05486c5b2624307e976f8ced3d34492fc097cf53352bc2234f3c7e7"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.643338 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.648424 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.148407577 +0000 UTC m=+165.068803159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.667043 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-62v62" event={"ID":"03dc5df6-dfb3-4839-9c45-71894d1b547b","Type":"ContainerStarted","Data":"d96e96044dd600bad9fef7a1bb0eac6263dbf11ad601f0269a2c48fbc43633f8"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.667100 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-62v62" event={"ID":"03dc5df6-dfb3-4839-9c45-71894d1b547b","Type":"ContainerStarted","Data":"82d1282f9b34966f3e5ff6c14241e58dfd4be21924b2846da0fe1be6f0e1c789"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.667128 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-62v62" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.692391 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" event={"ID":"1090992f-7c26-40f9-95c6-eaf05f31fda2","Type":"ContainerStarted","Data":"b31698e01e1b6f66ddac68d1e2c220695b50c4739a29155e8cb8f7179441b404"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.720817 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:19 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:19 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:19 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.721439 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.722191 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-r4frz" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.723033 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-62v62" podStartSLOduration=9.723018196 podStartE2EDuration="9.723018196s" podCreationTimestamp="2025-11-28 06:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:19.71033276 +0000 UTC m=+164.630728342" watchObservedRunningTime="2025-11-28 06:55:19.723018196 +0000 UTC m=+164.643413778" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.729546 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" event={"ID":"1f79995c-e40b-4f4a-9001-0054f046a944","Type":"ContainerStarted","Data":"9b159cd22149aa0f554746fdfeed932721121a92e05fb1902173c057617d1239"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.745782 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.746811 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.246794371 +0000 UTC m=+165.167189953 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.753143 4922 generic.go:334] "Generic (PLEG): container finished" podID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerID="cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd" exitCode=0 Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.753641 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lppzs" event={"ID":"b82398ae-40c9-40dc-8775-65f999dac1a8","Type":"ContainerDied","Data":"cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.753725 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lppzs" event={"ID":"b82398ae-40c9-40dc-8775-65f999dac1a8","Type":"ContainerStarted","Data":"dfc73622d6241d40d6f03ac756d8c62d47f09fab94e72fb742028afafd600817"} Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.765819 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.803708 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4gt9m" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.863353 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.863852 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.363831668 +0000 UTC m=+165.284227250 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.886770 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-s8ws4" podStartSLOduration=145.886740889 podStartE2EDuration="2m25.886740889s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:19.882306236 +0000 UTC m=+164.802701818" watchObservedRunningTime="2025-11-28 06:55:19.886740889 +0000 UTC m=+164.807136471" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.887094 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lftkx"] Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.946201 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qgkkf"] Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.955770 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.962201 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.967153 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:19 crc kubenswrapper[4922]: E1128 06:55:19.969344 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.469318171 +0000 UTC m=+165.389713923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:19 crc kubenswrapper[4922]: I1128 06:55:19.969649 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qgkkf"] Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.069475 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h294\" (UniqueName: \"kubernetes.io/projected/915bf055-c217-4565-a245-8901b61def3e-kube-api-access-7h294\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.069879 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-catalog-content\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.069938 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-utilities\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.069973 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:20 crc kubenswrapper[4922]: E1128 06:55:20.070374 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.57035932 +0000 UTC m=+165.490754902 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.171593 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:20 crc kubenswrapper[4922]: E1128 06:55:20.171779 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.671744858 +0000 UTC m=+165.592140440 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.172170 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h294\" (UniqueName: \"kubernetes.io/projected/915bf055-c217-4565-a245-8901b61def3e-kube-api-access-7h294\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.172214 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-catalog-content\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.172299 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-utilities\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.172348 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:20 crc kubenswrapper[4922]: E1128 06:55:20.172811 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.672791697 +0000 UTC m=+165.593187289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.172897 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-utilities\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.173107 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-catalog-content\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.195981 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h294\" (UniqueName: \"kubernetes.io/projected/915bf055-c217-4565-a245-8901b61def3e-kube-api-access-7h294\") pod \"redhat-operators-qgkkf\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.209665 4922 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.274519 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:20 crc kubenswrapper[4922]: E1128 06:55:20.275248 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.775213445 +0000 UTC m=+165.695609027 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.325169 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.348294 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wr979"] Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.349787 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.355912 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wr979"] Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.378043 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:20 crc kubenswrapper[4922]: E1128 06:55:20.378567 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.878545958 +0000 UTC m=+165.798941540 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.478781 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.479078 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-catalog-content\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.479097 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-utilities\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.479141 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwk9l\" (UniqueName: \"kubernetes.io/projected/069013ed-12e9-4b49-a869-b6956f06ec15-kube-api-access-cwk9l\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: E1128 06:55:20.479309 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 06:55:20.979291698 +0000 UTC m=+165.899687280 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.580750 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.581142 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-catalog-content\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.581162 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-utilities\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: E1128 06:55:20.581580 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 06:55:21.081559481 +0000 UTC m=+166.001955063 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-sckww" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.582523 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwk9l\" (UniqueName: \"kubernetes.io/projected/069013ed-12e9-4b49-a869-b6956f06ec15-kube-api-access-cwk9l\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.582538 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-catalog-content\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.582846 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-utilities\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.604675 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwk9l\" (UniqueName: \"kubernetes.io/projected/069013ed-12e9-4b49-a869-b6956f06ec15-kube-api-access-cwk9l\") pod \"redhat-operators-wr979\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.612270 4922 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T06:55:20.209693701Z","Handler":null,"Name":""} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.617251 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qgkkf"] Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.617581 4922 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.617611 4922 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 06:55:20 crc kubenswrapper[4922]: W1128 06:55:20.649753 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod915bf055_c217_4565_a245_8901b61def3e.slice/crio-e665c05be32adc1767950893da756e642519d3474d3f7fbbe0904ed099f93d97 WatchSource:0}: Error finding container e665c05be32adc1767950893da756e642519d3474d3f7fbbe0904ed099f93d97: Status 404 returned error can't find the container with id e665c05be32adc1767950893da756e642519d3474d3f7fbbe0904ed099f93d97 Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.674843 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.683588 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.710346 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.711429 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:20 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:20 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:20 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.711481 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.779965 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" event={"ID":"468ca9dc-6af3-47ae-8c7c-b9338ceae695","Type":"ContainerStarted","Data":"0782c50fc03159a6820308384e3ebbaf804f6f80c9076c7107c25c8a069f80af"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.783582 4922 generic.go:334] "Generic (PLEG): container finished" podID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerID="38a4d46d7f6531265e06c0f449254d2a6ca6d9dfe9697109abbeced1164e7ecf" exitCode=0 Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.783645 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lftkx" event={"ID":"cca6c3a6-ee5e-4d4c-8a39-68b087664c86","Type":"ContainerDied","Data":"38a4d46d7f6531265e06c0f449254d2a6ca6d9dfe9697109abbeced1164e7ecf"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.783684 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lftkx" event={"ID":"cca6c3a6-ee5e-4d4c-8a39-68b087664c86","Type":"ContainerStarted","Data":"9d41d00d4aac916db205d1d9e9fd2ab2bd85baf4d7f531131860f407d2d7faf5"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.785146 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.792742 4922 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.792782 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.795623 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" event={"ID":"709beb43-ed88-4a0a-b384-0c463e469964","Type":"ContainerStarted","Data":"fa14bc0bd6da65edb1e1b10d64971dbaf9dbab4348a4b754906e678e8d9a8586"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.795684 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-9kfr9" event={"ID":"709beb43-ed88-4a0a-b384-0c463e469964","Type":"ContainerStarted","Data":"edc777267050e8b521d5108bd84372f3bd11c4261eaeadd5c54e08c5d41d64ce"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.796362 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgkkf" event={"ID":"915bf055-c217-4565-a245-8901b61def3e","Type":"ContainerStarted","Data":"e665c05be32adc1767950893da756e642519d3474d3f7fbbe0904ed099f93d97"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.809876 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-djvcb" event={"ID":"04c1fcf2-5298-423f-ab9e-56043ced3e2f","Type":"ContainerStarted","Data":"9b1e9f3fb22a4aae476169e4933399dfa9a749d94f3fd20475922306c3ef9b1c"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.810199 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-djvcb" event={"ID":"04c1fcf2-5298-423f-ab9e-56043ced3e2f","Type":"ContainerStarted","Data":"299d297013c1bb0ada07e53abb85f3ccdae14b6b466e0936694b60abf5ffdc8e"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.814735 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" podStartSLOduration=146.814712979 podStartE2EDuration="2m26.814712979s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:20.802392723 +0000 UTC m=+165.722788315" watchObservedRunningTime="2025-11-28 06:55:20.814712979 +0000 UTC m=+165.735108561" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.827138 4922 generic.go:334] "Generic (PLEG): container finished" podID="5254d7c5-8faa-4ede-a82a-210426648d02" containerID="2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902" exitCode=0 Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.827208 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9mhp" event={"ID":"5254d7c5-8faa-4ede-a82a-210426648d02","Type":"ContainerDied","Data":"2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.827253 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9mhp" event={"ID":"5254d7c5-8faa-4ede-a82a-210426648d02","Type":"ContainerStarted","Data":"d653cc0c4757606d6c20b92dfcea5b844f77cde5814e443596ccc4c95323fd49"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.830074 4922 generic.go:334] "Generic (PLEG): container finished" podID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerID="6f2e930817c447b4c179490d2cf72b43d8a69650d7057e2b014c4b693a67799a" exitCode=0 Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.830625 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ncbvn" event={"ID":"d2e80403-da8b-49c0-9007-aa0c6c1be47a","Type":"ContainerDied","Data":"6f2e930817c447b4c179490d2cf72b43d8a69650d7057e2b014c4b693a67799a"} Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.834067 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-9kfr9" podStartSLOduration=146.83404745 podStartE2EDuration="2m26.83404745s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:20.831715045 +0000 UTC m=+165.752110637" watchObservedRunningTime="2025-11-28 06:55:20.83404745 +0000 UTC m=+165.754443032" Nov 28 06:55:20 crc kubenswrapper[4922]: I1128 06:55:20.867171 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-sckww\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.043522 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wr979"] Nov 28 06:55:21 crc kubenswrapper[4922]: W1128 06:55:21.073403 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod069013ed_12e9_4b49_a869_b6956f06ec15.slice/crio-f21c8e70b12eaa4d571ae501dba9aebf7a24687f9db9f06e6fbac47e804ff135 WatchSource:0}: Error finding container f21c8e70b12eaa4d571ae501dba9aebf7a24687f9db9f06e6fbac47e804ff135: Status 404 returned error can't find the container with id f21c8e70b12eaa4d571ae501dba9aebf7a24687f9db9f06e6fbac47e804ff135 Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.132280 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.412823 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.553139 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sckww"] Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.709806 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:21 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:21 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:21 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.709879 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.870291 4922 generic.go:334] "Generic (PLEG): container finished" podID="765b0f79-4316-4aab-90fa-a2aaa84380f0" containerID="0cb5619676c403739d55e20f2d790f7119e146f00eb6cb6ab6e58f20f205afb0" exitCode=0 Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.870325 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" event={"ID":"765b0f79-4316-4aab-90fa-a2aaa84380f0","Type":"ContainerDied","Data":"0cb5619676c403739d55e20f2d790f7119e146f00eb6cb6ab6e58f20f205afb0"} Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.879454 4922 generic.go:334] "Generic (PLEG): container finished" podID="915bf055-c217-4565-a245-8901b61def3e" containerID="6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5" exitCode=0 Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.879530 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgkkf" event={"ID":"915bf055-c217-4565-a245-8901b61def3e","Type":"ContainerDied","Data":"6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5"} Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.885163 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-djvcb" event={"ID":"04c1fcf2-5298-423f-ab9e-56043ced3e2f","Type":"ContainerStarted","Data":"64926c06dfd63ae129f1248e052916fe230cb350922f6c3c2a3acb363eae53c6"} Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.905979 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-djvcb" podStartSLOduration=11.905964158 podStartE2EDuration="11.905964158s" podCreationTimestamp="2025-11-28 06:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:21.90494151 +0000 UTC m=+166.825337092" watchObservedRunningTime="2025-11-28 06:55:21.905964158 +0000 UTC m=+166.826359740" Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.920195 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" event={"ID":"dd20079d-34c4-4ea6-920f-420a1d6bb863","Type":"ContainerStarted","Data":"91b1a67cc569943354c6ac89ad71588609e5aef1403b1d93aee31aca338fe5df"} Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.925032 4922 generic.go:334] "Generic (PLEG): container finished" podID="069013ed-12e9-4b49-a869-b6956f06ec15" containerID="5f7ba48771951a377d888de6709c7da8e9c761bf125f52b744f9da0f2265302c" exitCode=0 Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.925184 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr979" event={"ID":"069013ed-12e9-4b49-a869-b6956f06ec15","Type":"ContainerDied","Data":"5f7ba48771951a377d888de6709c7da8e9c761bf125f52b744f9da0f2265302c"} Nov 28 06:55:21 crc kubenswrapper[4922]: I1128 06:55:21.925669 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr979" event={"ID":"069013ed-12e9-4b49-a869-b6956f06ec15","Type":"ContainerStarted","Data":"f21c8e70b12eaa4d571ae501dba9aebf7a24687f9db9f06e6fbac47e804ff135"} Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.198690 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-2xxgj" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.475210 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.706169 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.709581 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:22 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:22 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:22 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.709690 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.899202 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.899289 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.905538 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.949300 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" event={"ID":"dd20079d-34c4-4ea6-920f-420a1d6bb863","Type":"ContainerStarted","Data":"0306f5f1cf36667e60609f05cb41f1eed0c4d0aa5a4fee9cdeab898377c19b9c"} Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.949631 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.953087 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-ht2sn" Nov 28 06:55:22 crc kubenswrapper[4922]: I1128 06:55:22.979162 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" podStartSLOduration=148.979135842 podStartE2EDuration="2m28.979135842s" podCreationTimestamp="2025-11-28 06:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:55:22.975162611 +0000 UTC m=+167.895558203" watchObservedRunningTime="2025-11-28 06:55:22.979135842 +0000 UTC m=+167.899531444" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.289034 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.338528 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.338568 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.342593 4922 patch_prober.go:28] interesting pod/console-f9d7485db-h297g container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.342653 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-h297g" podUID="7de83d0f-3269-4343-b2a9-398b8d4af4fc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.349816 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/765b0f79-4316-4aab-90fa-a2aaa84380f0-config-volume\") pod \"765b0f79-4316-4aab-90fa-a2aaa84380f0\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.349900 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/765b0f79-4316-4aab-90fa-a2aaa84380f0-secret-volume\") pod \"765b0f79-4316-4aab-90fa-a2aaa84380f0\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.349956 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwh96\" (UniqueName: \"kubernetes.io/projected/765b0f79-4316-4aab-90fa-a2aaa84380f0-kube-api-access-dwh96\") pod \"765b0f79-4316-4aab-90fa-a2aaa84380f0\" (UID: \"765b0f79-4316-4aab-90fa-a2aaa84380f0\") " Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.351638 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/765b0f79-4316-4aab-90fa-a2aaa84380f0-config-volume" (OuterVolumeSpecName: "config-volume") pod "765b0f79-4316-4aab-90fa-a2aaa84380f0" (UID: "765b0f79-4316-4aab-90fa-a2aaa84380f0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.358573 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/765b0f79-4316-4aab-90fa-a2aaa84380f0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "765b0f79-4316-4aab-90fa-a2aaa84380f0" (UID: "765b0f79-4316-4aab-90fa-a2aaa84380f0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.360761 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/765b0f79-4316-4aab-90fa-a2aaa84380f0-kube-api-access-dwh96" (OuterVolumeSpecName: "kube-api-access-dwh96") pod "765b0f79-4316-4aab-90fa-a2aaa84380f0" (UID: "765b0f79-4316-4aab-90fa-a2aaa84380f0"). InnerVolumeSpecName "kube-api-access-dwh96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.452655 4922 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/765b0f79-4316-4aab-90fa-a2aaa84380f0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.452944 4922 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/765b0f79-4316-4aab-90fa-a2aaa84380f0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.452955 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwh96\" (UniqueName: \"kubernetes.io/projected/765b0f79-4316-4aab-90fa-a2aaa84380f0-kube-api-access-dwh96\") on node \"crc\" DevicePath \"\"" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.554098 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 06:55:23 crc kubenswrapper[4922]: E1128 06:55:23.554310 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="765b0f79-4316-4aab-90fa-a2aaa84380f0" containerName="collect-profiles" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.554323 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="765b0f79-4316-4aab-90fa-a2aaa84380f0" containerName="collect-profiles" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.554433 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="765b0f79-4316-4aab-90fa-a2aaa84380f0" containerName="collect-profiles" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.559991 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.560139 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.563958 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.564355 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.656331 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.656401 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.709286 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:23 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:23 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:23 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.709387 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.757996 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.758132 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.758194 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.811119 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.885682 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.971568 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" event={"ID":"765b0f79-4316-4aab-90fa-a2aaa84380f0","Type":"ContainerDied","Data":"d0bab58658aaa82fa618f04f10d10548e4dda2c9b36b6093df3e91dca8077482"} Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.971612 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0bab58658aaa82fa618f04f10d10548e4dda2c9b36b6093df3e91dca8077482" Nov 28 06:55:23 crc kubenswrapper[4922]: I1128 06:55:23.971606 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g" Nov 28 06:55:24 crc kubenswrapper[4922]: I1128 06:55:24.346619 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 06:55:24 crc kubenswrapper[4922]: W1128 06:55:24.353938 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poda0f5ec8e_0fd7_4b47_ba62_fc7e00d92420.slice/crio-a33f461518b843bc171c49ff6e080581de8d003c208700295255cbc07625913e WatchSource:0}: Error finding container a33f461518b843bc171c49ff6e080581de8d003c208700295255cbc07625913e: Status 404 returned error can't find the container with id a33f461518b843bc171c49ff6e080581de8d003c208700295255cbc07625913e Nov 28 06:55:24 crc kubenswrapper[4922]: I1128 06:55:24.709995 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:24 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:24 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:24 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:24 crc kubenswrapper[4922]: I1128 06:55:24.710064 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:24 crc kubenswrapper[4922]: I1128 06:55:24.975586 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 06:55:24 crc kubenswrapper[4922]: I1128 06:55:24.976372 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:55:24 crc kubenswrapper[4922]: I1128 06:55:24.978205 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 06:55:24 crc kubenswrapper[4922]: I1128 06:55:24.981111 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 06:55:24 crc kubenswrapper[4922]: I1128 06:55:24.981283 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.038046 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420","Type":"ContainerStarted","Data":"a33f461518b843bc171c49ff6e080581de8d003c208700295255cbc07625913e"} Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.182542 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/94fc9125-ad02-4332-83b8-fe82cd247ec5-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"94fc9125-ad02-4332-83b8-fe82cd247ec5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.182610 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/94fc9125-ad02-4332-83b8-fe82cd247ec5-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"94fc9125-ad02-4332-83b8-fe82cd247ec5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.284261 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/94fc9125-ad02-4332-83b8-fe82cd247ec5-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"94fc9125-ad02-4332-83b8-fe82cd247ec5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.284329 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/94fc9125-ad02-4332-83b8-fe82cd247ec5-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"94fc9125-ad02-4332-83b8-fe82cd247ec5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.284421 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/94fc9125-ad02-4332-83b8-fe82cd247ec5-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"94fc9125-ad02-4332-83b8-fe82cd247ec5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.317639 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/94fc9125-ad02-4332-83b8-fe82cd247ec5-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"94fc9125-ad02-4332-83b8-fe82cd247ec5\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.591421 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.709904 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:25 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:25 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:25 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.709974 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:25 crc kubenswrapper[4922]: I1128 06:55:25.861352 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 06:55:25 crc kubenswrapper[4922]: W1128 06:55:25.893990 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod94fc9125_ad02_4332_83b8_fe82cd247ec5.slice/crio-cea5d0a6841df33be496b590c01a0d6299d549e5a52415d4cdaab52c1c50a6da WatchSource:0}: Error finding container cea5d0a6841df33be496b590c01a0d6299d549e5a52415d4cdaab52c1c50a6da: Status 404 returned error can't find the container with id cea5d0a6841df33be496b590c01a0d6299d549e5a52415d4cdaab52c1c50a6da Nov 28 06:55:26 crc kubenswrapper[4922]: I1128 06:55:26.058469 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"94fc9125-ad02-4332-83b8-fe82cd247ec5","Type":"ContainerStarted","Data":"cea5d0a6841df33be496b590c01a0d6299d549e5a52415d4cdaab52c1c50a6da"} Nov 28 06:55:26 crc kubenswrapper[4922]: I1128 06:55:26.063844 4922 generic.go:334] "Generic (PLEG): container finished" podID="a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420" containerID="1cec87aa1b0c42a1b7aa6926495a6af6283c84242876a01bff67e4eeda94090f" exitCode=0 Nov 28 06:55:26 crc kubenswrapper[4922]: I1128 06:55:26.063879 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420","Type":"ContainerDied","Data":"1cec87aa1b0c42a1b7aa6926495a6af6283c84242876a01bff67e4eeda94090f"} Nov 28 06:55:26 crc kubenswrapper[4922]: I1128 06:55:26.709111 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:26 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:26 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:26 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:26 crc kubenswrapper[4922]: I1128 06:55:26.709175 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:27 crc kubenswrapper[4922]: I1128 06:55:27.311926 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 06:55:27 crc kubenswrapper[4922]: I1128 06:55:27.312268 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 06:55:27 crc kubenswrapper[4922]: I1128 06:55:27.708557 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:27 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:27 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:27 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:27 crc kubenswrapper[4922]: I1128 06:55:27.708629 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:28 crc kubenswrapper[4922]: I1128 06:55:28.441660 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-62v62" Nov 28 06:55:28 crc kubenswrapper[4922]: I1128 06:55:28.708012 4922 patch_prober.go:28] interesting pod/router-default-5444994796-xbhv9 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 06:55:28 crc kubenswrapper[4922]: [-]has-synced failed: reason withheld Nov 28 06:55:28 crc kubenswrapper[4922]: [+]process-running ok Nov 28 06:55:28 crc kubenswrapper[4922]: healthz check failed Nov 28 06:55:28 crc kubenswrapper[4922]: I1128 06:55:28.708330 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-xbhv9" podUID="87cf54e1-7498-430e-a517-6658bd9ce547" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 06:55:29 crc kubenswrapper[4922]: I1128 06:55:29.744191 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:29 crc kubenswrapper[4922]: I1128 06:55:29.747851 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-xbhv9" Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.172501 4922 patch_prober.go:28] interesting pod/console-f9d7485db-h297g container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.172835 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-h297g" podUID="7de83d0f-3269-4343-b2a9-398b8d4af4fc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.20:8443/health\": dial tcp 10.217.0.20:8443: connect: connection refused" Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.686378 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.880018 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kube-api-access\") pod \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\" (UID: \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\") " Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.880153 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kubelet-dir\") pod \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\" (UID: \"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420\") " Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.880245 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420" (UID: "a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.880385 4922 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.885567 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420" (UID: "a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:55:34 crc kubenswrapper[4922]: I1128 06:55:34.981530 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 06:55:35 crc kubenswrapper[4922]: I1128 06:55:35.192239 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420","Type":"ContainerDied","Data":"a33f461518b843bc171c49ff6e080581de8d003c208700295255cbc07625913e"} Nov 28 06:55:35 crc kubenswrapper[4922]: I1128 06:55:35.192280 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a33f461518b843bc171c49ff6e080581de8d003c208700295255cbc07625913e" Nov 28 06:55:35 crc kubenswrapper[4922]: I1128 06:55:35.192360 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 06:55:41 crc kubenswrapper[4922]: I1128 06:55:41.139330 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:55:43 crc kubenswrapper[4922]: I1128 06:55:43.294028 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 06:55:43 crc kubenswrapper[4922]: I1128 06:55:43.385033 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:43 crc kubenswrapper[4922]: I1128 06:55:43.393792 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-h297g" Nov 28 06:55:52 crc kubenswrapper[4922]: I1128 06:55:52.629195 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h7pf2" Nov 28 06:55:54 crc kubenswrapper[4922]: E1128 06:55:54.452993 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 06:55:54 crc kubenswrapper[4922]: E1128 06:55:54.453345 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xc26q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-lppzs_openshift-marketplace(b82398ae-40c9-40dc-8775-65f999dac1a8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 06:55:54 crc kubenswrapper[4922]: E1128 06:55:54.455499 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-lppzs" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" Nov 28 06:55:55 crc kubenswrapper[4922]: E1128 06:55:55.850710 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 06:55:55 crc kubenswrapper[4922]: E1128 06:55:55.850896 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tvhbg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-ncbvn_openshift-marketplace(d2e80403-da8b-49c0-9007-aa0c6c1be47a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 06:55:55 crc kubenswrapper[4922]: E1128 06:55:55.852104 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-ncbvn" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" Nov 28 06:55:56 crc kubenswrapper[4922]: E1128 06:55:56.007820 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-lppzs" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" Nov 28 06:55:56 crc kubenswrapper[4922]: E1128 06:55:56.318035 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 06:55:56 crc kubenswrapper[4922]: E1128 06:55:56.318500 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7w28p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-fww6r_openshift-marketplace(23771385-c219-4713-9c79-d4802b2f13a7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 06:55:56 crc kubenswrapper[4922]: E1128 06:55:56.319935 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-fww6r" podUID="23771385-c219-4713-9c79-d4802b2f13a7" Nov 28 06:55:57 crc kubenswrapper[4922]: I1128 06:55:57.311501 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 06:55:57 crc kubenswrapper[4922]: I1128 06:55:57.311599 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 06:55:57 crc kubenswrapper[4922]: I1128 06:55:57.311681 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:55:57 crc kubenswrapper[4922]: I1128 06:55:57.312442 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 06:55:57 crc kubenswrapper[4922]: I1128 06:55:57.312564 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314" gracePeriod=600 Nov 28 06:55:57 crc kubenswrapper[4922]: E1128 06:55:57.387698 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0498340a_5e95_42bf_a0a6_8ac89a6b8858.slice/crio-b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314.scope\": RecentStats: unable to find data in memory cache]" Nov 28 06:55:58 crc kubenswrapper[4922]: I1128 06:55:58.354810 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314" exitCode=0 Nov 28 06:55:58 crc kubenswrapper[4922]: I1128 06:55:58.354892 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314"} Nov 28 06:55:59 crc kubenswrapper[4922]: E1128 06:55:59.148691 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-fww6r" podUID="23771385-c219-4713-9c79-d4802b2f13a7" Nov 28 06:55:59 crc kubenswrapper[4922]: E1128 06:55:59.148736 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-ncbvn" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" Nov 28 06:55:59 crc kubenswrapper[4922]: E1128 06:55:59.213467 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 06:55:59 crc kubenswrapper[4922]: E1128 06:55:59.213972 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cwk9l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wr979_openshift-marketplace(069013ed-12e9-4b49-a869-b6956f06ec15): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 06:55:59 crc kubenswrapper[4922]: E1128 06:55:59.215499 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-wr979" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.135844 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-wr979" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.170516 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.170787 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420" containerName="pruner" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.170802 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420" containerName="pruner" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.170941 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0f5ec8e-0fd7-4b47-ba62-fc7e00d92420" containerName="pruner" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.171370 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.176547 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.228870 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.229011 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b9phc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-lftkx_openshift-marketplace(cca6c3a6-ee5e-4d4c-8a39-68b087664c86): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.230318 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-lftkx" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.232417 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.232697 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l27pt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-d9mhp_openshift-marketplace(5254d7c5-8faa-4ede-a82a-210426648d02): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.234391 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-d9mhp" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.287572 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.287727 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7h294,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-qgkkf_openshift-marketplace(915bf055-c217-4565-a245-8901b61def3e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.288866 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-qgkkf" podUID="915bf055-c217-4565-a245-8901b61def3e" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.294544 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.295013 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sbj55,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vzlv9_openshift-marketplace(09d238cf-e3a6-461a-bdf8-f598c91385a3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.296236 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vzlv9" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.358744 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/11f07dc5-9896-407d-ae34-69690126be1e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"11f07dc5-9896-407d-ae34-69690126be1e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.358815 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/11f07dc5-9896-407d-ae34-69690126be1e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"11f07dc5-9896-407d-ae34-69690126be1e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.375375 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"94fc9125-ad02-4332-83b8-fe82cd247ec5","Type":"ContainerStarted","Data":"10da1964ceb79f4c35af11be7cb9c1313395b001a134aa9ad6d609913d838f64"} Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.377634 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"faa2e15e685a62da719acf12fe75a2f590f6b07faa5cd9c8e0a536878c38e595"} Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.379040 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-lftkx" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.379480 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-vzlv9" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.379490 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-d9mhp" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" Nov 28 06:56:00 crc kubenswrapper[4922]: E1128 06:56:00.380245 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-qgkkf" podUID="915bf055-c217-4565-a245-8901b61def3e" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.393831 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=36.393785884 podStartE2EDuration="36.393785884s" podCreationTimestamp="2025-11-28 06:55:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:56:00.388022843 +0000 UTC m=+205.308418425" watchObservedRunningTime="2025-11-28 06:56:00.393785884 +0000 UTC m=+205.314181476" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.459956 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/11f07dc5-9896-407d-ae34-69690126be1e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"11f07dc5-9896-407d-ae34-69690126be1e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.460025 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/11f07dc5-9896-407d-ae34-69690126be1e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"11f07dc5-9896-407d-ae34-69690126be1e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.460411 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/11f07dc5-9896-407d-ae34-69690126be1e-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"11f07dc5-9896-407d-ae34-69690126be1e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.476887 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/11f07dc5-9896-407d-ae34-69690126be1e-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"11f07dc5-9896-407d-ae34-69690126be1e\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.532192 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:00 crc kubenswrapper[4922]: I1128 06:56:00.937528 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 06:56:00 crc kubenswrapper[4922]: W1128 06:56:00.946385 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod11f07dc5_9896_407d_ae34_69690126be1e.slice/crio-2dd23f19b8be9326a2ead6092ddea245ab5d0da6868d487a7da95e665169e66f WatchSource:0}: Error finding container 2dd23f19b8be9326a2ead6092ddea245ab5d0da6868d487a7da95e665169e66f: Status 404 returned error can't find the container with id 2dd23f19b8be9326a2ead6092ddea245ab5d0da6868d487a7da95e665169e66f Nov 28 06:56:01 crc kubenswrapper[4922]: I1128 06:56:01.387876 4922 generic.go:334] "Generic (PLEG): container finished" podID="94fc9125-ad02-4332-83b8-fe82cd247ec5" containerID="10da1964ceb79f4c35af11be7cb9c1313395b001a134aa9ad6d609913d838f64" exitCode=0 Nov 28 06:56:01 crc kubenswrapper[4922]: I1128 06:56:01.387948 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"94fc9125-ad02-4332-83b8-fe82cd247ec5","Type":"ContainerDied","Data":"10da1964ceb79f4c35af11be7cb9c1313395b001a134aa9ad6d609913d838f64"} Nov 28 06:56:01 crc kubenswrapper[4922]: I1128 06:56:01.392618 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"11f07dc5-9896-407d-ae34-69690126be1e","Type":"ContainerStarted","Data":"2dd23f19b8be9326a2ead6092ddea245ab5d0da6868d487a7da95e665169e66f"} Nov 28 06:56:02 crc kubenswrapper[4922]: I1128 06:56:02.402270 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"11f07dc5-9896-407d-ae34-69690126be1e","Type":"ContainerStarted","Data":"a36bde07c73789741366813164281633e4647a7dcaf265b18135611b28bc97c9"} Nov 28 06:56:02 crc kubenswrapper[4922]: I1128 06:56:02.438211 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=2.438124771 podStartE2EDuration="2.438124771s" podCreationTimestamp="2025-11-28 06:56:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:56:02.427856484 +0000 UTC m=+207.348252106" watchObservedRunningTime="2025-11-28 06:56:02.438124771 +0000 UTC m=+207.358520393" Nov 28 06:56:02 crc kubenswrapper[4922]: I1128 06:56:02.835612 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.005266 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/94fc9125-ad02-4332-83b8-fe82cd247ec5-kube-api-access\") pod \"94fc9125-ad02-4332-83b8-fe82cd247ec5\" (UID: \"94fc9125-ad02-4332-83b8-fe82cd247ec5\") " Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.005370 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/94fc9125-ad02-4332-83b8-fe82cd247ec5-kubelet-dir\") pod \"94fc9125-ad02-4332-83b8-fe82cd247ec5\" (UID: \"94fc9125-ad02-4332-83b8-fe82cd247ec5\") " Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.005435 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/94fc9125-ad02-4332-83b8-fe82cd247ec5-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "94fc9125-ad02-4332-83b8-fe82cd247ec5" (UID: "94fc9125-ad02-4332-83b8-fe82cd247ec5"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.005833 4922 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/94fc9125-ad02-4332-83b8-fe82cd247ec5-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.015613 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94fc9125-ad02-4332-83b8-fe82cd247ec5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "94fc9125-ad02-4332-83b8-fe82cd247ec5" (UID: "94fc9125-ad02-4332-83b8-fe82cd247ec5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.108139 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/94fc9125-ad02-4332-83b8-fe82cd247ec5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.423435 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"94fc9125-ad02-4332-83b8-fe82cd247ec5","Type":"ContainerDied","Data":"cea5d0a6841df33be496b590c01a0d6299d549e5a52415d4cdaab52c1c50a6da"} Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.423502 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cea5d0a6841df33be496b590c01a0d6299d549e5a52415d4cdaab52c1c50a6da" Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.423597 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.428543 4922 generic.go:334] "Generic (PLEG): container finished" podID="11f07dc5-9896-407d-ae34-69690126be1e" containerID="a36bde07c73789741366813164281633e4647a7dcaf265b18135611b28bc97c9" exitCode=0 Nov 28 06:56:03 crc kubenswrapper[4922]: I1128 06:56:03.428619 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"11f07dc5-9896-407d-ae34-69690126be1e","Type":"ContainerDied","Data":"a36bde07c73789741366813164281633e4647a7dcaf265b18135611b28bc97c9"} Nov 28 06:56:04 crc kubenswrapper[4922]: I1128 06:56:04.739381 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:04 crc kubenswrapper[4922]: I1128 06:56:04.933171 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/11f07dc5-9896-407d-ae34-69690126be1e-kube-api-access\") pod \"11f07dc5-9896-407d-ae34-69690126be1e\" (UID: \"11f07dc5-9896-407d-ae34-69690126be1e\") " Nov 28 06:56:04 crc kubenswrapper[4922]: I1128 06:56:04.933306 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/11f07dc5-9896-407d-ae34-69690126be1e-kubelet-dir\") pod \"11f07dc5-9896-407d-ae34-69690126be1e\" (UID: \"11f07dc5-9896-407d-ae34-69690126be1e\") " Nov 28 06:56:04 crc kubenswrapper[4922]: I1128 06:56:04.933702 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/11f07dc5-9896-407d-ae34-69690126be1e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "11f07dc5-9896-407d-ae34-69690126be1e" (UID: "11f07dc5-9896-407d-ae34-69690126be1e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:56:04 crc kubenswrapper[4922]: I1128 06:56:04.940399 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11f07dc5-9896-407d-ae34-69690126be1e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "11f07dc5-9896-407d-ae34-69690126be1e" (UID: "11f07dc5-9896-407d-ae34-69690126be1e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:56:05 crc kubenswrapper[4922]: I1128 06:56:05.035066 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/11f07dc5-9896-407d-ae34-69690126be1e-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:05 crc kubenswrapper[4922]: I1128 06:56:05.035150 4922 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/11f07dc5-9896-407d-ae34-69690126be1e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:05 crc kubenswrapper[4922]: I1128 06:56:05.441418 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"11f07dc5-9896-407d-ae34-69690126be1e","Type":"ContainerDied","Data":"2dd23f19b8be9326a2ead6092ddea245ab5d0da6868d487a7da95e665169e66f"} Nov 28 06:56:05 crc kubenswrapper[4922]: I1128 06:56:05.441745 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2dd23f19b8be9326a2ead6092ddea245ab5d0da6868d487a7da95e665169e66f" Nov 28 06:56:05 crc kubenswrapper[4922]: I1128 06:56:05.441491 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.766191 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 06:56:07 crc kubenswrapper[4922]: E1128 06:56:07.766890 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94fc9125-ad02-4332-83b8-fe82cd247ec5" containerName="pruner" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.766912 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="94fc9125-ad02-4332-83b8-fe82cd247ec5" containerName="pruner" Nov 28 06:56:07 crc kubenswrapper[4922]: E1128 06:56:07.766939 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11f07dc5-9896-407d-ae34-69690126be1e" containerName="pruner" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.766952 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="11f07dc5-9896-407d-ae34-69690126be1e" containerName="pruner" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.767119 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="94fc9125-ad02-4332-83b8-fe82cd247ec5" containerName="pruner" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.767141 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="11f07dc5-9896-407d-ae34-69690126be1e" containerName="pruner" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.767779 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.770389 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.770775 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.775161 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.775325 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-var-lock\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.775493 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kube-api-access\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.790788 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.876121 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-var-lock\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.876187 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kube-api-access\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.876250 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.876291 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-var-lock\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.876352 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:07 crc kubenswrapper[4922]: I1128 06:56:07.895114 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kube-api-access\") pod \"installer-9-crc\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:08 crc kubenswrapper[4922]: I1128 06:56:08.085868 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:08 crc kubenswrapper[4922]: I1128 06:56:08.469991 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 06:56:08 crc kubenswrapper[4922]: W1128 06:56:08.484903 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podd5da4dd4_e0f3_46c8_88cd_e84251d03b1b.slice/crio-0dee4e0269ecd4aad99a667867ca281c0b6901311353b2a480df5978152151c7 WatchSource:0}: Error finding container 0dee4e0269ecd4aad99a667867ca281c0b6901311353b2a480df5978152151c7: Status 404 returned error can't find the container with id 0dee4e0269ecd4aad99a667867ca281c0b6901311353b2a480df5978152151c7 Nov 28 06:56:09 crc kubenswrapper[4922]: I1128 06:56:09.462019 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b","Type":"ContainerStarted","Data":"3505eecb5a39b4e42725ac3f99be8d95d0b88e7682d14356be34ab8409dd3ea0"} Nov 28 06:56:09 crc kubenswrapper[4922]: I1128 06:56:09.463271 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b","Type":"ContainerStarted","Data":"0dee4e0269ecd4aad99a667867ca281c0b6901311353b2a480df5978152151c7"} Nov 28 06:56:09 crc kubenswrapper[4922]: I1128 06:56:09.483020 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.483001651 podStartE2EDuration="2.483001651s" podCreationTimestamp="2025-11-28 06:56:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:56:09.48047529 +0000 UTC m=+214.400870872" watchObservedRunningTime="2025-11-28 06:56:09.483001651 +0000 UTC m=+214.403397233" Nov 28 06:56:11 crc kubenswrapper[4922]: I1128 06:56:11.475339 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lppzs" event={"ID":"b82398ae-40c9-40dc-8775-65f999dac1a8","Type":"ContainerStarted","Data":"50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd"} Nov 28 06:56:12 crc kubenswrapper[4922]: I1128 06:56:12.482583 4922 generic.go:334] "Generic (PLEG): container finished" podID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerID="50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd" exitCode=0 Nov 28 06:56:12 crc kubenswrapper[4922]: I1128 06:56:12.482625 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lppzs" event={"ID":"b82398ae-40c9-40dc-8775-65f999dac1a8","Type":"ContainerDied","Data":"50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd"} Nov 28 06:56:13 crc kubenswrapper[4922]: I1128 06:56:13.489016 4922 generic.go:334] "Generic (PLEG): container finished" podID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerID="13522133c2f5744c223802d0ef3cc34fafc4f7996f56cb7751118635a742297f" exitCode=0 Nov 28 06:56:13 crc kubenswrapper[4922]: I1128 06:56:13.489289 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ncbvn" event={"ID":"d2e80403-da8b-49c0-9007-aa0c6c1be47a","Type":"ContainerDied","Data":"13522133c2f5744c223802d0ef3cc34fafc4f7996f56cb7751118635a742297f"} Nov 28 06:56:13 crc kubenswrapper[4922]: I1128 06:56:13.495212 4922 generic.go:334] "Generic (PLEG): container finished" podID="23771385-c219-4713-9c79-d4802b2f13a7" containerID="52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d" exitCode=0 Nov 28 06:56:13 crc kubenswrapper[4922]: I1128 06:56:13.495278 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fww6r" event={"ID":"23771385-c219-4713-9c79-d4802b2f13a7","Type":"ContainerDied","Data":"52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d"} Nov 28 06:56:13 crc kubenswrapper[4922]: I1128 06:56:13.504008 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lppzs" event={"ID":"b82398ae-40c9-40dc-8775-65f999dac1a8","Type":"ContainerStarted","Data":"00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec"} Nov 28 06:56:13 crc kubenswrapper[4922]: I1128 06:56:13.506007 4922 generic.go:334] "Generic (PLEG): container finished" podID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerID="e0e8450d713017a03c081349939b6791cc41eb9baabeaca5c77933fa1c15f768" exitCode=0 Nov 28 06:56:13 crc kubenswrapper[4922]: I1128 06:56:13.506048 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lftkx" event={"ID":"cca6c3a6-ee5e-4d4c-8a39-68b087664c86","Type":"ContainerDied","Data":"e0e8450d713017a03c081349939b6791cc41eb9baabeaca5c77933fa1c15f768"} Nov 28 06:56:13 crc kubenswrapper[4922]: I1128 06:56:13.527944 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lppzs" podStartSLOduration=4.328049388 podStartE2EDuration="57.527920802s" podCreationTimestamp="2025-11-28 06:55:16 +0000 UTC" firstStartedPulling="2025-11-28 06:55:19.757393418 +0000 UTC m=+164.677789000" lastFinishedPulling="2025-11-28 06:56:12.957264832 +0000 UTC m=+217.877660414" observedRunningTime="2025-11-28 06:56:13.524691802 +0000 UTC m=+218.445087394" watchObservedRunningTime="2025-11-28 06:56:13.527920802 +0000 UTC m=+218.448316424" Nov 28 06:56:14 crc kubenswrapper[4922]: I1128 06:56:14.512203 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr979" event={"ID":"069013ed-12e9-4b49-a869-b6956f06ec15","Type":"ContainerStarted","Data":"fe8657d372a70ff8e2dc81120e613e85f34a34d44ffd339912c1b89cb5d1ae3e"} Nov 28 06:56:14 crc kubenswrapper[4922]: I1128 06:56:14.514683 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lftkx" event={"ID":"cca6c3a6-ee5e-4d4c-8a39-68b087664c86","Type":"ContainerStarted","Data":"7b96965cf8bcecb7bb947647968f3fa821e5dbe3ab28b0d544afeb71f95d70fc"} Nov 28 06:56:14 crc kubenswrapper[4922]: I1128 06:56:14.517151 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ncbvn" event={"ID":"d2e80403-da8b-49c0-9007-aa0c6c1be47a","Type":"ContainerStarted","Data":"5da366e264d5f169510e2c25b1c10ebaa7b947dcdfa831cd9274c13a4744c2e5"} Nov 28 06:56:14 crc kubenswrapper[4922]: I1128 06:56:14.519289 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fww6r" event={"ID":"23771385-c219-4713-9c79-d4802b2f13a7","Type":"ContainerStarted","Data":"c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c"} Nov 28 06:56:14 crc kubenswrapper[4922]: I1128 06:56:14.549623 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fww6r" podStartSLOduration=3.886083371 podStartE2EDuration="58.549605757s" podCreationTimestamp="2025-11-28 06:55:16 +0000 UTC" firstStartedPulling="2025-11-28 06:55:19.261003572 +0000 UTC m=+164.181399144" lastFinishedPulling="2025-11-28 06:56:13.924525938 +0000 UTC m=+218.844921530" observedRunningTime="2025-11-28 06:56:14.549156855 +0000 UTC m=+219.469552447" watchObservedRunningTime="2025-11-28 06:56:14.549605757 +0000 UTC m=+219.470001339" Nov 28 06:56:14 crc kubenswrapper[4922]: I1128 06:56:14.576398 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ncbvn" podStartSLOduration=3.742391513 podStartE2EDuration="58.576383025s" podCreationTimestamp="2025-11-28 06:55:16 +0000 UTC" firstStartedPulling="2025-11-28 06:55:19.261778053 +0000 UTC m=+164.182173635" lastFinishedPulling="2025-11-28 06:56:14.095769565 +0000 UTC m=+219.016165147" observedRunningTime="2025-11-28 06:56:14.575274143 +0000 UTC m=+219.495669715" watchObservedRunningTime="2025-11-28 06:56:14.576383025 +0000 UTC m=+219.496778607" Nov 28 06:56:14 crc kubenswrapper[4922]: I1128 06:56:14.609337 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lftkx" podStartSLOduration=3.485231585 podStartE2EDuration="56.609321013s" podCreationTimestamp="2025-11-28 06:55:18 +0000 UTC" firstStartedPulling="2025-11-28 06:55:20.80975862 +0000 UTC m=+165.730154202" lastFinishedPulling="2025-11-28 06:56:13.933848038 +0000 UTC m=+218.854243630" observedRunningTime="2025-11-28 06:56:14.606824884 +0000 UTC m=+219.527220466" watchObservedRunningTime="2025-11-28 06:56:14.609321013 +0000 UTC m=+219.529716585" Nov 28 06:56:15 crc kubenswrapper[4922]: I1128 06:56:15.526803 4922 generic.go:334] "Generic (PLEG): container finished" podID="069013ed-12e9-4b49-a869-b6956f06ec15" containerID="fe8657d372a70ff8e2dc81120e613e85f34a34d44ffd339912c1b89cb5d1ae3e" exitCode=0 Nov 28 06:56:15 crc kubenswrapper[4922]: I1128 06:56:15.526868 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr979" event={"ID":"069013ed-12e9-4b49-a869-b6956f06ec15","Type":"ContainerDied","Data":"fe8657d372a70ff8e2dc81120e613e85f34a34d44ffd339912c1b89cb5d1ae3e"} Nov 28 06:56:15 crc kubenswrapper[4922]: I1128 06:56:15.529737 4922 generic.go:334] "Generic (PLEG): container finished" podID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerID="3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c" exitCode=0 Nov 28 06:56:15 crc kubenswrapper[4922]: I1128 06:56:15.529782 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vzlv9" event={"ID":"09d238cf-e3a6-461a-bdf8-f598c91385a3","Type":"ContainerDied","Data":"3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c"} Nov 28 06:56:15 crc kubenswrapper[4922]: I1128 06:56:15.531851 4922 generic.go:334] "Generic (PLEG): container finished" podID="5254d7c5-8faa-4ede-a82a-210426648d02" containerID="ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564" exitCode=0 Nov 28 06:56:15 crc kubenswrapper[4922]: I1128 06:56:15.531877 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9mhp" event={"ID":"5254d7c5-8faa-4ede-a82a-210426648d02","Type":"ContainerDied","Data":"ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564"} Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.539576 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9mhp" event={"ID":"5254d7c5-8faa-4ede-a82a-210426648d02","Type":"ContainerStarted","Data":"3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec"} Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.541465 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr979" event={"ID":"069013ed-12e9-4b49-a869-b6956f06ec15","Type":"ContainerStarted","Data":"aa0441119ee09e07d09d23a767135367d861f6681ca710a281aefe77ee580676"} Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.543173 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vzlv9" event={"ID":"09d238cf-e3a6-461a-bdf8-f598c91385a3","Type":"ContainerStarted","Data":"8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d"} Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.545395 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgkkf" event={"ID":"915bf055-c217-4565-a245-8901b61def3e","Type":"ContainerStarted","Data":"83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b"} Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.554542 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.554582 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.558689 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d9mhp" podStartSLOduration=3.31259363 podStartE2EDuration="58.558665609s" podCreationTimestamp="2025-11-28 06:55:18 +0000 UTC" firstStartedPulling="2025-11-28 06:55:20.828656379 +0000 UTC m=+165.749051961" lastFinishedPulling="2025-11-28 06:56:16.074728368 +0000 UTC m=+220.995123940" observedRunningTime="2025-11-28 06:56:16.557886028 +0000 UTC m=+221.478281620" watchObservedRunningTime="2025-11-28 06:56:16.558665609 +0000 UTC m=+221.479061191" Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.573148 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vzlv9" podStartSLOduration=3.935537351 podStartE2EDuration="1m0.573127913s" podCreationTimestamp="2025-11-28 06:55:16 +0000 UTC" firstStartedPulling="2025-11-28 06:55:19.357782572 +0000 UTC m=+164.278178154" lastFinishedPulling="2025-11-28 06:56:15.995373134 +0000 UTC m=+220.915768716" observedRunningTime="2025-11-28 06:56:16.572377742 +0000 UTC m=+221.492773324" watchObservedRunningTime="2025-11-28 06:56:16.573127913 +0000 UTC m=+221.493523495" Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.782210 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.800958 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wr979" podStartSLOduration=2.531720281 podStartE2EDuration="56.800939779s" podCreationTimestamp="2025-11-28 06:55:20 +0000 UTC" firstStartedPulling="2025-11-28 06:55:21.942339007 +0000 UTC m=+166.862734579" lastFinishedPulling="2025-11-28 06:56:16.211558495 +0000 UTC m=+221.131954077" observedRunningTime="2025-11-28 06:56:16.61173073 +0000 UTC m=+221.532126332" watchObservedRunningTime="2025-11-28 06:56:16.800939779 +0000 UTC m=+221.721335361" Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.978705 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:56:16 crc kubenswrapper[4922]: I1128 06:56:16.978767 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:56:17 crc kubenswrapper[4922]: I1128 06:56:17.022699 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:56:17 crc kubenswrapper[4922]: I1128 06:56:17.115763 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:56:17 crc kubenswrapper[4922]: I1128 06:56:17.115808 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:56:17 crc kubenswrapper[4922]: I1128 06:56:17.157144 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:56:17 crc kubenswrapper[4922]: I1128 06:56:17.337416 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:56:17 crc kubenswrapper[4922]: I1128 06:56:17.337481 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:56:17 crc kubenswrapper[4922]: I1128 06:56:17.558961 4922 generic.go:334] "Generic (PLEG): container finished" podID="915bf055-c217-4565-a245-8901b61def3e" containerID="83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b" exitCode=0 Nov 28 06:56:17 crc kubenswrapper[4922]: I1128 06:56:17.559030 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgkkf" event={"ID":"915bf055-c217-4565-a245-8901b61def3e","Type":"ContainerDied","Data":"83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b"} Nov 28 06:56:18 crc kubenswrapper[4922]: I1128 06:56:18.372083 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-vzlv9" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="registry-server" probeResult="failure" output=< Nov 28 06:56:18 crc kubenswrapper[4922]: timeout: failed to connect service ":50051" within 1s Nov 28 06:56:18 crc kubenswrapper[4922]: > Nov 28 06:56:18 crc kubenswrapper[4922]: I1128 06:56:18.742517 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:56:18 crc kubenswrapper[4922]: I1128 06:56:18.742770 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:56:18 crc kubenswrapper[4922]: I1128 06:56:18.790579 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:56:19 crc kubenswrapper[4922]: I1128 06:56:19.086353 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:56:19 crc kubenswrapper[4922]: I1128 06:56:19.086551 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:56:19 crc kubenswrapper[4922]: I1128 06:56:19.124115 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:56:19 crc kubenswrapper[4922]: I1128 06:56:19.615441 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:56:20 crc kubenswrapper[4922]: I1128 06:56:20.675832 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:56:20 crc kubenswrapper[4922]: I1128 06:56:20.675901 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:56:21 crc kubenswrapper[4922]: I1128 06:56:21.497802 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bqwm2"] Nov 28 06:56:21 crc kubenswrapper[4922]: I1128 06:56:21.723490 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wr979" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="registry-server" probeResult="failure" output=< Nov 28 06:56:21 crc kubenswrapper[4922]: timeout: failed to connect service ":50051" within 1s Nov 28 06:56:21 crc kubenswrapper[4922]: > Nov 28 06:56:21 crc kubenswrapper[4922]: I1128 06:56:21.838141 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lftkx"] Nov 28 06:56:22 crc kubenswrapper[4922]: I1128 06:56:22.591788 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lftkx" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerName="registry-server" containerID="cri-o://7b96965cf8bcecb7bb947647968f3fa821e5dbe3ab28b0d544afeb71f95d70fc" gracePeriod=2 Nov 28 06:56:23 crc kubenswrapper[4922]: I1128 06:56:23.603962 4922 generic.go:334] "Generic (PLEG): container finished" podID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerID="7b96965cf8bcecb7bb947647968f3fa821e5dbe3ab28b0d544afeb71f95d70fc" exitCode=0 Nov 28 06:56:23 crc kubenswrapper[4922]: I1128 06:56:23.604113 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lftkx" event={"ID":"cca6c3a6-ee5e-4d4c-8a39-68b087664c86","Type":"ContainerDied","Data":"7b96965cf8bcecb7bb947647968f3fa821e5dbe3ab28b0d544afeb71f95d70fc"} Nov 28 06:56:23 crc kubenswrapper[4922]: I1128 06:56:23.609004 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgkkf" event={"ID":"915bf055-c217-4565-a245-8901b61def3e","Type":"ContainerStarted","Data":"18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36"} Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.134894 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.160533 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qgkkf" podStartSLOduration=6.047902895 podStartE2EDuration="1m6.160503569s" podCreationTimestamp="2025-11-28 06:55:19 +0000 UTC" firstStartedPulling="2025-11-28 06:55:21.881757961 +0000 UTC m=+166.802153543" lastFinishedPulling="2025-11-28 06:56:21.994358635 +0000 UTC m=+226.914754217" observedRunningTime="2025-11-28 06:56:23.643671389 +0000 UTC m=+228.564066981" watchObservedRunningTime="2025-11-28 06:56:25.160503569 +0000 UTC m=+230.080899211" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.223821 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-utilities\") pod \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.224130 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-catalog-content\") pod \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.224210 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9phc\" (UniqueName: \"kubernetes.io/projected/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-kube-api-access-b9phc\") pod \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\" (UID: \"cca6c3a6-ee5e-4d4c-8a39-68b087664c86\") " Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.224619 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-utilities" (OuterVolumeSpecName: "utilities") pod "cca6c3a6-ee5e-4d4c-8a39-68b087664c86" (UID: "cca6c3a6-ee5e-4d4c-8a39-68b087664c86"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.234023 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-kube-api-access-b9phc" (OuterVolumeSpecName: "kube-api-access-b9phc") pod "cca6c3a6-ee5e-4d4c-8a39-68b087664c86" (UID: "cca6c3a6-ee5e-4d4c-8a39-68b087664c86"). InnerVolumeSpecName "kube-api-access-b9phc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.326016 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9phc\" (UniqueName: \"kubernetes.io/projected/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-kube-api-access-b9phc\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.326055 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.594353 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cca6c3a6-ee5e-4d4c-8a39-68b087664c86" (UID: "cca6c3a6-ee5e-4d4c-8a39-68b087664c86"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.622869 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lftkx" event={"ID":"cca6c3a6-ee5e-4d4c-8a39-68b087664c86","Type":"ContainerDied","Data":"9d41d00d4aac916db205d1d9e9fd2ab2bd85baf4d7f531131860f407d2d7faf5"} Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.622942 4922 scope.go:117] "RemoveContainer" containerID="7b96965cf8bcecb7bb947647968f3fa821e5dbe3ab28b0d544afeb71f95d70fc" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.623118 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lftkx" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.630193 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cca6c3a6-ee5e-4d4c-8a39-68b087664c86-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.649241 4922 scope.go:117] "RemoveContainer" containerID="e0e8450d713017a03c081349939b6791cc41eb9baabeaca5c77933fa1c15f768" Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.657992 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lftkx"] Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.664457 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lftkx"] Nov 28 06:56:25 crc kubenswrapper[4922]: I1128 06:56:25.683478 4922 scope.go:117] "RemoveContainer" containerID="38a4d46d7f6531265e06c0f449254d2a6ca6d9dfe9697109abbeced1164e7ecf" Nov 28 06:56:26 crc kubenswrapper[4922]: I1128 06:56:26.619616 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:56:27 crc kubenswrapper[4922]: I1128 06:56:27.031018 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:56:27 crc kubenswrapper[4922]: I1128 06:56:27.161701 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:56:27 crc kubenswrapper[4922]: I1128 06:56:27.389467 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:56:27 crc kubenswrapper[4922]: I1128 06:56:27.420801 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" path="/var/lib/kubelet/pods/cca6c3a6-ee5e-4d4c-8a39-68b087664c86/volumes" Nov 28 06:56:27 crc kubenswrapper[4922]: I1128 06:56:27.449727 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:56:28 crc kubenswrapper[4922]: I1128 06:56:28.814697 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:56:28 crc kubenswrapper[4922]: I1128 06:56:28.837989 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ncbvn"] Nov 28 06:56:28 crc kubenswrapper[4922]: I1128 06:56:28.838379 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ncbvn" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerName="registry-server" containerID="cri-o://5da366e264d5f169510e2c25b1c10ebaa7b947dcdfa831cd9274c13a4744c2e5" gracePeriod=2 Nov 28 06:56:29 crc kubenswrapper[4922]: I1128 06:56:29.435120 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vzlv9"] Nov 28 06:56:29 crc kubenswrapper[4922]: I1128 06:56:29.435937 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vzlv9" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="registry-server" containerID="cri-o://8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d" gracePeriod=2 Nov 28 06:56:29 crc kubenswrapper[4922]: I1128 06:56:29.647382 4922 generic.go:334] "Generic (PLEG): container finished" podID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerID="5da366e264d5f169510e2c25b1c10ebaa7b947dcdfa831cd9274c13a4744c2e5" exitCode=0 Nov 28 06:56:29 crc kubenswrapper[4922]: I1128 06:56:29.647425 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ncbvn" event={"ID":"d2e80403-da8b-49c0-9007-aa0c6c1be47a","Type":"ContainerDied","Data":"5da366e264d5f169510e2c25b1c10ebaa7b947dcdfa831cd9274c13a4744c2e5"} Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.099272 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.227669 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-utilities\") pod \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.227800 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvhbg\" (UniqueName: \"kubernetes.io/projected/d2e80403-da8b-49c0-9007-aa0c6c1be47a-kube-api-access-tvhbg\") pod \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.227877 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-catalog-content\") pod \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\" (UID: \"d2e80403-da8b-49c0-9007-aa0c6c1be47a\") " Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.229256 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-utilities" (OuterVolumeSpecName: "utilities") pod "d2e80403-da8b-49c0-9007-aa0c6c1be47a" (UID: "d2e80403-da8b-49c0-9007-aa0c6c1be47a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.237384 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2e80403-da8b-49c0-9007-aa0c6c1be47a-kube-api-access-tvhbg" (OuterVolumeSpecName: "kube-api-access-tvhbg") pod "d2e80403-da8b-49c0-9007-aa0c6c1be47a" (UID: "d2e80403-da8b-49c0-9007-aa0c6c1be47a"). InnerVolumeSpecName "kube-api-access-tvhbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.275799 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2e80403-da8b-49c0-9007-aa0c6c1be47a" (UID: "d2e80403-da8b-49c0-9007-aa0c6c1be47a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.325098 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.325397 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.325430 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.328799 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.328829 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvhbg\" (UniqueName: \"kubernetes.io/projected/d2e80403-da8b-49c0-9007-aa0c6c1be47a-kube-api-access-tvhbg\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.328843 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2e80403-da8b-49c0-9007-aa0c6c1be47a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.430251 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-catalog-content\") pod \"09d238cf-e3a6-461a-bdf8-f598c91385a3\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.430363 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-utilities\") pod \"09d238cf-e3a6-461a-bdf8-f598c91385a3\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.430430 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbj55\" (UniqueName: \"kubernetes.io/projected/09d238cf-e3a6-461a-bdf8-f598c91385a3-kube-api-access-sbj55\") pod \"09d238cf-e3a6-461a-bdf8-f598c91385a3\" (UID: \"09d238cf-e3a6-461a-bdf8-f598c91385a3\") " Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.431791 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-utilities" (OuterVolumeSpecName: "utilities") pod "09d238cf-e3a6-461a-bdf8-f598c91385a3" (UID: "09d238cf-e3a6-461a-bdf8-f598c91385a3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.434210 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09d238cf-e3a6-461a-bdf8-f598c91385a3-kube-api-access-sbj55" (OuterVolumeSpecName: "kube-api-access-sbj55") pod "09d238cf-e3a6-461a-bdf8-f598c91385a3" (UID: "09d238cf-e3a6-461a-bdf8-f598c91385a3"). InnerVolumeSpecName "kube-api-access-sbj55". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.486673 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09d238cf-e3a6-461a-bdf8-f598c91385a3" (UID: "09d238cf-e3a6-461a-bdf8-f598c91385a3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.532514 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.533258 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09d238cf-e3a6-461a-bdf8-f598c91385a3-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.533268 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sbj55\" (UniqueName: \"kubernetes.io/projected/09d238cf-e3a6-461a-bdf8-f598c91385a3-kube-api-access-sbj55\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.656121 4922 generic.go:334] "Generic (PLEG): container finished" podID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerID="8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d" exitCode=0 Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.656206 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vzlv9" event={"ID":"09d238cf-e3a6-461a-bdf8-f598c91385a3","Type":"ContainerDied","Data":"8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d"} Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.656248 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vzlv9" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.656270 4922 scope.go:117] "RemoveContainer" containerID="8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.656257 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vzlv9" event={"ID":"09d238cf-e3a6-461a-bdf8-f598c91385a3","Type":"ContainerDied","Data":"f3c2e4faafd7483fc2a212d4cbf0a20923c750b335b8cdc56a960bcf1ace663f"} Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.661202 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ncbvn" event={"ID":"d2e80403-da8b-49c0-9007-aa0c6c1be47a","Type":"ContainerDied","Data":"f65d46290a1857b2ab7a69ece4e57ea2537c61a2bc5def9fa0cceb51caf59333"} Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.661374 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ncbvn" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.680688 4922 scope.go:117] "RemoveContainer" containerID="3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.710772 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vzlv9"] Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.722124 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vzlv9"] Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.726789 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.726903 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ncbvn"] Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.734042 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ncbvn"] Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.735599 4922 scope.go:117] "RemoveContainer" containerID="e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.755396 4922 scope.go:117] "RemoveContainer" containerID="8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d" Nov 28 06:56:30 crc kubenswrapper[4922]: E1128 06:56:30.757164 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d\": container with ID starting with 8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d not found: ID does not exist" containerID="8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.757307 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d"} err="failed to get container status \"8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d\": rpc error: code = NotFound desc = could not find container \"8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d\": container with ID starting with 8883cf0c581252a1e7b15d72f175f57e7c8e45453324612d28a4fa3facaa3f3d not found: ID does not exist" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.757389 4922 scope.go:117] "RemoveContainer" containerID="3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c" Nov 28 06:56:30 crc kubenswrapper[4922]: E1128 06:56:30.757937 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c\": container with ID starting with 3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c not found: ID does not exist" containerID="3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.757995 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c"} err="failed to get container status \"3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c\": rpc error: code = NotFound desc = could not find container \"3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c\": container with ID starting with 3c7aaeedd2bf52d4c343f349b12f83cf9348f317076fab783b3a4dd56a35049c not found: ID does not exist" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.758035 4922 scope.go:117] "RemoveContainer" containerID="e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3" Nov 28 06:56:30 crc kubenswrapper[4922]: E1128 06:56:30.758722 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3\": container with ID starting with e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3 not found: ID does not exist" containerID="e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.758757 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3"} err="failed to get container status \"e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3\": rpc error: code = NotFound desc = could not find container \"e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3\": container with ID starting with e98e9fd357015efc6e5600ce20cf3a46c9af87e122277d7611017961c92923a3 not found: ID does not exist" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.758786 4922 scope.go:117] "RemoveContainer" containerID="5da366e264d5f169510e2c25b1c10ebaa7b947dcdfa831cd9274c13a4744c2e5" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.789886 4922 scope.go:117] "RemoveContainer" containerID="13522133c2f5744c223802d0ef3cc34fafc4f7996f56cb7751118635a742297f" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.799420 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:56:30 crc kubenswrapper[4922]: I1128 06:56:30.814373 4922 scope.go:117] "RemoveContainer" containerID="6f2e930817c447b4c179490d2cf72b43d8a69650d7057e2b014c4b693a67799a" Nov 28 06:56:31 crc kubenswrapper[4922]: I1128 06:56:31.376528 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qgkkf" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="registry-server" probeResult="failure" output=< Nov 28 06:56:31 crc kubenswrapper[4922]: timeout: failed to connect service ":50051" within 1s Nov 28 06:56:31 crc kubenswrapper[4922]: > Nov 28 06:56:31 crc kubenswrapper[4922]: I1128 06:56:31.410523 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" path="/var/lib/kubelet/pods/09d238cf-e3a6-461a-bdf8-f598c91385a3/volumes" Nov 28 06:56:31 crc kubenswrapper[4922]: I1128 06:56:31.412143 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" path="/var/lib/kubelet/pods/d2e80403-da8b-49c0-9007-aa0c6c1be47a/volumes" Nov 28 06:56:33 crc kubenswrapper[4922]: I1128 06:56:33.835080 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wr979"] Nov 28 06:56:33 crc kubenswrapper[4922]: I1128 06:56:33.835666 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wr979" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="registry-server" containerID="cri-o://aa0441119ee09e07d09d23a767135367d861f6681ca710a281aefe77ee580676" gracePeriod=2 Nov 28 06:56:35 crc kubenswrapper[4922]: I1128 06:56:35.693969 4922 generic.go:334] "Generic (PLEG): container finished" podID="069013ed-12e9-4b49-a869-b6956f06ec15" containerID="aa0441119ee09e07d09d23a767135367d861f6681ca710a281aefe77ee580676" exitCode=0 Nov 28 06:56:35 crc kubenswrapper[4922]: I1128 06:56:35.694028 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr979" event={"ID":"069013ed-12e9-4b49-a869-b6956f06ec15","Type":"ContainerDied","Data":"aa0441119ee09e07d09d23a767135367d861f6681ca710a281aefe77ee580676"} Nov 28 06:56:36 crc kubenswrapper[4922]: I1128 06:56:36.936407 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.040060 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwk9l\" (UniqueName: \"kubernetes.io/projected/069013ed-12e9-4b49-a869-b6956f06ec15-kube-api-access-cwk9l\") pod \"069013ed-12e9-4b49-a869-b6956f06ec15\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.040117 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-utilities\") pod \"069013ed-12e9-4b49-a869-b6956f06ec15\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.040151 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-catalog-content\") pod \"069013ed-12e9-4b49-a869-b6956f06ec15\" (UID: \"069013ed-12e9-4b49-a869-b6956f06ec15\") " Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.044724 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-utilities" (OuterVolumeSpecName: "utilities") pod "069013ed-12e9-4b49-a869-b6956f06ec15" (UID: "069013ed-12e9-4b49-a869-b6956f06ec15"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.049628 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/069013ed-12e9-4b49-a869-b6956f06ec15-kube-api-access-cwk9l" (OuterVolumeSpecName: "kube-api-access-cwk9l") pod "069013ed-12e9-4b49-a869-b6956f06ec15" (UID: "069013ed-12e9-4b49-a869-b6956f06ec15"). InnerVolumeSpecName "kube-api-access-cwk9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.143053 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwk9l\" (UniqueName: \"kubernetes.io/projected/069013ed-12e9-4b49-a869-b6956f06ec15-kube-api-access-cwk9l\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.143091 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.147565 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "069013ed-12e9-4b49-a869-b6956f06ec15" (UID: "069013ed-12e9-4b49-a869-b6956f06ec15"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.244682 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/069013ed-12e9-4b49-a869-b6956f06ec15-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.731937 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wr979" event={"ID":"069013ed-12e9-4b49-a869-b6956f06ec15","Type":"ContainerDied","Data":"f21c8e70b12eaa4d571ae501dba9aebf7a24687f9db9f06e6fbac47e804ff135"} Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.732031 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wr979" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.732041 4922 scope.go:117] "RemoveContainer" containerID="aa0441119ee09e07d09d23a767135367d861f6681ca710a281aefe77ee580676" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.758955 4922 scope.go:117] "RemoveContainer" containerID="fe8657d372a70ff8e2dc81120e613e85f34a34d44ffd339912c1b89cb5d1ae3e" Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.760199 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wr979"] Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.767901 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wr979"] Nov 28 06:56:37 crc kubenswrapper[4922]: I1128 06:56:37.785761 4922 scope.go:117] "RemoveContainer" containerID="5f7ba48771951a377d888de6709c7da8e9c761bf125f52b744f9da0f2265302c" Nov 28 06:56:39 crc kubenswrapper[4922]: I1128 06:56:39.413621 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" path="/var/lib/kubelet/pods/069013ed-12e9-4b49-a869-b6956f06ec15/volumes" Nov 28 06:56:40 crc kubenswrapper[4922]: I1128 06:56:40.388708 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:56:40 crc kubenswrapper[4922]: I1128 06:56:40.457395 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.522397 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" podUID="d952449e-c825-43d3-a591-0be473db6a53" containerName="oauth-openshift" containerID="cri-o://b1ef3378b6d83028bdda421db6b41b59b763fc32d7b520b0bf701c78a42fccae" gracePeriod=15 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.567902 4922 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568214 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="extract-utilities" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568269 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="extract-utilities" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568290 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="extract-content" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568303 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="extract-content" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568325 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="extract-content" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568338 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="extract-content" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568355 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568367 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568383 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerName="extract-content" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568394 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerName="extract-content" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568414 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerName="extract-content" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568426 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerName="extract-content" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568443 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568455 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568477 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerName="extract-utilities" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568490 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerName="extract-utilities" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568507 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568520 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568537 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="extract-utilities" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568548 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="extract-utilities" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568570 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568583 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.568603 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerName="extract-utilities" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568615 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerName="extract-utilities" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568771 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="cca6c3a6-ee5e-4d4c-8a39-68b087664c86" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568797 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="069013ed-12e9-4b49-a869-b6956f06ec15" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568814 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2e80403-da8b-49c0-9007-aa0c6c1be47a" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.568843 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="09d238cf-e3a6-461a-bdf8-f598c91385a3" containerName="registry-server" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.569459 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.618574 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.639802 4922 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.640405 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096" gracePeriod=15 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.640434 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7" gracePeriod=15 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.640465 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44" gracePeriod=15 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.640580 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8" gracePeriod=15 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.640464 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89" gracePeriod=15 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.641452 4922 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.641771 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.641799 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.641815 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.641827 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.641850 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.641862 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.641884 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.641897 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.641912 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.641926 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.641943 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.641955 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.641976 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.641987 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.642144 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.642161 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.642175 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.642196 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.642243 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.642259 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.717352 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.717545 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.717622 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.717682 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.717758 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820606 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820659 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820740 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820772 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820793 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820820 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820852 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820889 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.820995 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.821051 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.821079 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.821109 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.821137 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.821630 4922 generic.go:334] "Generic (PLEG): container finished" podID="d952449e-c825-43d3-a591-0be473db6a53" containerID="b1ef3378b6d83028bdda421db6b41b59b763fc32d7b520b0bf701c78a42fccae" exitCode=0 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.821724 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" event={"ID":"d952449e-c825-43d3-a591-0be473db6a53","Type":"ContainerDied","Data":"b1ef3378b6d83028bdda421db6b41b59b763fc32d7b520b0bf701c78a42fccae"} Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.835772 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.837311 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.837779 4922 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7" exitCode=0 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.837800 4922 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89" exitCode=0 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.837808 4922 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44" exitCode=0 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.837816 4922 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8" exitCode=2 Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.837850 4922 scope.go:117] "RemoveContainer" containerID="43f9db3464d315f5b596f15327b9e7cfc2935bb8f18492bcf4aa1b2d8e2e8951" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.910988 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.922310 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.922355 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.922414 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.922430 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.922458 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: I1128 06:56:46.922458 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:46 crc kubenswrapper[4922]: E1128 06:56:46.939098 4922 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.143:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c1955032b019d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 06:56:46.938456477 +0000 UTC m=+251.858852059,LastTimestamp:2025-11-28 06:56:46.938456477 +0000 UTC m=+251.858852059,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.024612 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.025859 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.026626 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.027112 4922 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.124054 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-service-ca\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.124124 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-provider-selection\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.124156 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-idp-0-file-data\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.124185 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-cliconfig\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.124527 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-ocp-branding-template\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.125315 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.125433 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-audit-policies\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.125436 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.125461 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-trusted-ca-bundle\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.126129 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.126214 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.126302 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-error\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.126357 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d952449e-c825-43d3-a591-0be473db6a53-audit-dir\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.126393 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-login\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.126432 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d952449e-c825-43d3-a591-0be473db6a53-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.126923 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-session\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.127052 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-router-certs\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.127093 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-serving-cert\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.127123 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nklb9\" (UniqueName: \"kubernetes.io/projected/d952449e-c825-43d3-a591-0be473db6a53-kube-api-access-nklb9\") pod \"d952449e-c825-43d3-a591-0be473db6a53\" (UID: \"d952449e-c825-43d3-a591-0be473db6a53\") " Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.128493 4922 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.128512 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.128527 4922 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d952449e-c825-43d3-a591-0be473db6a53-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.128540 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.128561 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.131978 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.132046 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d952449e-c825-43d3-a591-0be473db6a53-kube-api-access-nklb9" (OuterVolumeSpecName: "kube-api-access-nklb9") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "kube-api-access-nklb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.132978 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.134737 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.136648 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.137319 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.137787 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.138083 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.138315 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "d952449e-c825-43d3-a591-0be473db6a53" (UID: "d952449e-c825-43d3-a591-0be473db6a53"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.229967 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nklb9\" (UniqueName: \"kubernetes.io/projected/d952449e-c825-43d3-a591-0be473db6a53-kube-api-access-nklb9\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.230007 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.230022 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.230040 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.230053 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.230065 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.230077 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.230092 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.230103 4922 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/d952449e-c825-43d3-a591-0be473db6a53-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.848034 4922 generic.go:334] "Generic (PLEG): container finished" podID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" containerID="3505eecb5a39b4e42725ac3f99be8d95d0b88e7682d14356be34ab8409dd3ea0" exitCode=0 Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.848144 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b","Type":"ContainerDied","Data":"3505eecb5a39b4e42725ac3f99be8d95d0b88e7682d14356be34ab8409dd3ea0"} Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.849473 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.850049 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.850830 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.852498 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" event={"ID":"d952449e-c825-43d3-a591-0be473db6a53","Type":"ContainerDied","Data":"7b38742f2ca0fdd464f439f7c942c558bd4abf399f0820f11a39f651c50772ea"} Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.852559 4922 scope.go:117] "RemoveContainer" containerID="b1ef3378b6d83028bdda421db6b41b59b763fc32d7b520b0bf701c78a42fccae" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.852566 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.853550 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.855661 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.856210 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.861082 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.861597 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.862185 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.862928 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.865324 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad"} Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.865374 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"94bc5da51fe5658f6965c48bb2db3706d2325069ef310a674523ffb7b22b8083"} Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.866167 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.866983 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:47 crc kubenswrapper[4922]: I1128 06:56:47.867705 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:48 crc kubenswrapper[4922]: E1128 06:56:48.647961 4922 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:48 crc kubenswrapper[4922]: E1128 06:56:48.649026 4922 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:48 crc kubenswrapper[4922]: E1128 06:56:48.649918 4922 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:48 crc kubenswrapper[4922]: E1128 06:56:48.650416 4922 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:48 crc kubenswrapper[4922]: E1128 06:56:48.650888 4922 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:48 crc kubenswrapper[4922]: I1128 06:56:48.650936 4922 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 06:56:48 crc kubenswrapper[4922]: E1128 06:56:48.651300 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="200ms" Nov 28 06:56:48 crc kubenswrapper[4922]: E1128 06:56:48.852417 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="400ms" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.024669 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.025329 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.025820 4922 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.025987 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.026243 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.026678 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.066596 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.066642 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.066712 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.066717 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.066788 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.066874 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.066995 4922 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.067009 4922 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.067017 4922 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.113864 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.114567 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.115139 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.115716 4922 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.116146 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: E1128 06:56:49.254783 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="800ms" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.269355 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kubelet-dir\") pod \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.269467 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-var-lock\") pod \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.269522 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kube-api-access\") pod \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\" (UID: \"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b\") " Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.269543 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" (UID: "d5da4dd4-e0f3-46c8-88cd-e84251d03b1b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.269694 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-var-lock" (OuterVolumeSpecName: "var-lock") pod "d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" (UID: "d5da4dd4-e0f3-46c8-88cd-e84251d03b1b"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.269861 4922 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.269883 4922 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.279524 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" (UID: "d5da4dd4-e0f3-46c8-88cd-e84251d03b1b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.370730 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5da4dd4-e0f3-46c8-88cd-e84251d03b1b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.412473 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.892789 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.893757 4922 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096" exitCode=0 Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.893874 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.893891 4922 scope.go:117] "RemoveContainer" containerID="ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.894869 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.895480 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.896003 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.896337 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.896483 4922 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.896329 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d5da4dd4-e0f3-46c8-88cd-e84251d03b1b","Type":"ContainerDied","Data":"0dee4e0269ecd4aad99a667867ca281c0b6901311353b2a480df5978152151c7"} Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.896762 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dee4e0269ecd4aad99a667867ca281c0b6901311353b2a480df5978152151c7" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.897420 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.897746 4922 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.897954 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.898156 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.900396 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.900755 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.902010 4922 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.902320 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.912177 4922 scope.go:117] "RemoveContainer" containerID="cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.927984 4922 scope.go:117] "RemoveContainer" containerID="c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.946139 4922 scope.go:117] "RemoveContainer" containerID="12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.967593 4922 scope.go:117] "RemoveContainer" containerID="d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096" Nov 28 06:56:49 crc kubenswrapper[4922]: I1128 06:56:49.989564 4922 scope.go:117] "RemoveContainer" containerID="9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.023498 4922 scope.go:117] "RemoveContainer" containerID="ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7" Nov 28 06:56:50 crc kubenswrapper[4922]: E1128 06:56:50.024105 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\": container with ID starting with ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7 not found: ID does not exist" containerID="ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.024139 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7"} err="failed to get container status \"ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\": rpc error: code = NotFound desc = could not find container \"ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7\": container with ID starting with ff005273cedb4a3be19dafd07b2572733446d00c1aa3d89dec6dad79a9b37cb7 not found: ID does not exist" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.024160 4922 scope.go:117] "RemoveContainer" containerID="cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89" Nov 28 06:56:50 crc kubenswrapper[4922]: E1128 06:56:50.024658 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\": container with ID starting with cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89 not found: ID does not exist" containerID="cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.024705 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89"} err="failed to get container status \"cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\": rpc error: code = NotFound desc = could not find container \"cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89\": container with ID starting with cd17aa15325fbe3a40b81cea5bfec137ef02a8dd2d5f8393be4d206a8360ba89 not found: ID does not exist" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.024736 4922 scope.go:117] "RemoveContainer" containerID="c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44" Nov 28 06:56:50 crc kubenswrapper[4922]: E1128 06:56:50.025308 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\": container with ID starting with c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44 not found: ID does not exist" containerID="c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.025357 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44"} err="failed to get container status \"c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\": rpc error: code = NotFound desc = could not find container \"c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44\": container with ID starting with c63926690deadbe1a0b9bd4228c1be8c9d959ae52dc13540e1d7da0ef6ce2b44 not found: ID does not exist" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.025375 4922 scope.go:117] "RemoveContainer" containerID="12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8" Nov 28 06:56:50 crc kubenswrapper[4922]: E1128 06:56:50.025702 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\": container with ID starting with 12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8 not found: ID does not exist" containerID="12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.025724 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8"} err="failed to get container status \"12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\": rpc error: code = NotFound desc = could not find container \"12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8\": container with ID starting with 12e55656cf2a2cadbef853f458f347aabe75bd6b644a48a8c320144a0bbb77f8 not found: ID does not exist" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.025735 4922 scope.go:117] "RemoveContainer" containerID="d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096" Nov 28 06:56:50 crc kubenswrapper[4922]: E1128 06:56:50.025984 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\": container with ID starting with d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096 not found: ID does not exist" containerID="d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.026020 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096"} err="failed to get container status \"d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\": rpc error: code = NotFound desc = could not find container \"d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096\": container with ID starting with d8727c0c5c76c4071561c95b14554063c01a2d7d826bfef19624a346f7079096 not found: ID does not exist" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.026040 4922 scope.go:117] "RemoveContainer" containerID="9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47" Nov 28 06:56:50 crc kubenswrapper[4922]: E1128 06:56:50.026420 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\": container with ID starting with 9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47 not found: ID does not exist" containerID="9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47" Nov 28 06:56:50 crc kubenswrapper[4922]: I1128 06:56:50.026453 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47"} err="failed to get container status \"9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\": rpc error: code = NotFound desc = could not find container \"9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47\": container with ID starting with 9a1a9e0fb2169a4caf2986906a87573648458725e2571ec377704856d3c16b47 not found: ID does not exist" Nov 28 06:56:50 crc kubenswrapper[4922]: E1128 06:56:50.055548 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="1.6s" Nov 28 06:56:51 crc kubenswrapper[4922]: E1128 06:56:51.657136 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="3.2s" Nov 28 06:56:51 crc kubenswrapper[4922]: E1128 06:56:51.717368 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:56:51Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:56:51Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:56:51Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T06:56:51Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:51 crc kubenswrapper[4922]: E1128 06:56:51.718006 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:51 crc kubenswrapper[4922]: E1128 06:56:51.718585 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:51 crc kubenswrapper[4922]: E1128 06:56:51.719192 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:51 crc kubenswrapper[4922]: E1128 06:56:51.719651 4922 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:51 crc kubenswrapper[4922]: E1128 06:56:51.719689 4922 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 06:56:54 crc kubenswrapper[4922]: E1128 06:56:54.858623 4922 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.143:6443: connect: connection refused" interval="6.4s" Nov 28 06:56:55 crc kubenswrapper[4922]: I1128 06:56:55.403685 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:55 crc kubenswrapper[4922]: I1128 06:56:55.404435 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:55 crc kubenswrapper[4922]: I1128 06:56:55.405047 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:56 crc kubenswrapper[4922]: E1128 06:56:56.468064 4922 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.143:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187c1955032b019d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 06:56:46.938456477 +0000 UTC m=+251.858852059,LastTimestamp:2025-11-28 06:56:46.938456477 +0000 UTC m=+251.858852059,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.398783 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.400123 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.400995 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.401491 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.424754 4922 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.424809 4922 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:56:57 crc kubenswrapper[4922]: E1128 06:56:57.425432 4922 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.426299 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.979943 4922 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="9f50f6cdddde146b03460d592e1424d123ebb19c81677283a3504c44c7e2f53d" exitCode=0 Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.980021 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"9f50f6cdddde146b03460d592e1424d123ebb19c81677283a3504c44c7e2f53d"} Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.980466 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1a430efbc7d45a8048c82bbdeff694fbf7b7838e04e709484ada0aa709e12229"} Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.980878 4922 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.980901 4922 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.981411 4922 status_manager.go:851] "Failed to get status for pod" podUID="d952449e-c825-43d3-a591-0be473db6a53" pod="openshift-authentication/oauth-openshift-558db77b4-bqwm2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-bqwm2\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:57 crc kubenswrapper[4922]: E1128 06:56:57.981423 4922 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.982463 4922 status_manager.go:851] "Failed to get status for pod" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:57 crc kubenswrapper[4922]: I1128 06:56:57.982969 4922 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.143:6443: connect: connection refused" Nov 28 06:56:58 crc kubenswrapper[4922]: I1128 06:56:58.990565 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"320f18596a7a99bf5d32179b8dd58636ef443699cdb83ed35d501716ebe2db7e"} Nov 28 06:56:58 crc kubenswrapper[4922]: I1128 06:56:58.990937 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f4cc174d447674cc6fe56cb10073e905d7a38d61416b964e98b8f8ad5393ab7a"} Nov 28 06:56:58 crc kubenswrapper[4922]: I1128 06:56:58.990954 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"047a32a2d831822301a00fb904b2d49731b1b03a1ba52f533aa53435ae1be1dd"} Nov 28 06:56:59 crc kubenswrapper[4922]: I1128 06:56:59.999711 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d195929f53a7c3caeb6780060159b7a6cdc68113dec8f39c2334b05803e5b7ae"} Nov 28 06:57:00 crc kubenswrapper[4922]: I1128 06:57:00.000150 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fa816c4e813b12acc3a66ae675686306ec33d6162ea0fe3d46ae5dd590a83808"} Nov 28 06:57:00 crc kubenswrapper[4922]: I1128 06:57:00.000524 4922 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:57:00 crc kubenswrapper[4922]: I1128 06:57:00.000622 4922 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:57:00 crc kubenswrapper[4922]: I1128 06:57:00.000950 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:57:01 crc kubenswrapper[4922]: I1128 06:57:01.016570 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 06:57:01 crc kubenswrapper[4922]: I1128 06:57:01.016621 4922 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936" exitCode=1 Nov 28 06:57:01 crc kubenswrapper[4922]: I1128 06:57:01.016653 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936"} Nov 28 06:57:01 crc kubenswrapper[4922]: I1128 06:57:01.017101 4922 scope.go:117] "RemoveContainer" containerID="1c84fe4f54f9b9fc76f23f13aaee875e9d127be3dec3e3952893436fb9d94936" Nov 28 06:57:02 crc kubenswrapper[4922]: I1128 06:57:02.032439 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 06:57:02 crc kubenswrapper[4922]: I1128 06:57:02.032863 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2d607694c3e92960eb01b8f8648f0a9414b0a6fa5965bfc3b676fff3a90f7fb2"} Nov 28 06:57:02 crc kubenswrapper[4922]: I1128 06:57:02.427101 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:57:02 crc kubenswrapper[4922]: I1128 06:57:02.427171 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:57:02 crc kubenswrapper[4922]: I1128 06:57:02.436045 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:57:04 crc kubenswrapper[4922]: I1128 06:57:04.335514 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:57:04 crc kubenswrapper[4922]: I1128 06:57:04.335935 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:57:04 crc kubenswrapper[4922]: I1128 06:57:04.340691 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 06:57:04 crc kubenswrapper[4922]: I1128 06:57:04.340709 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 06:57:04 crc kubenswrapper[4922]: I1128 06:57:04.347504 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:57:04 crc kubenswrapper[4922]: I1128 06:57:04.355294 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:57:04 crc kubenswrapper[4922]: I1128 06:57:04.557013 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 06:57:05 crc kubenswrapper[4922]: I1128 06:57:05.012795 4922 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:57:05 crc kubenswrapper[4922]: I1128 06:57:05.054473 4922 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:57:05 crc kubenswrapper[4922]: I1128 06:57:05.054517 4922 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:57:05 crc kubenswrapper[4922]: I1128 06:57:05.066509 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:57:05 crc kubenswrapper[4922]: I1128 06:57:05.414380 4922 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="049075bf-45f8-4692-868a-908757107ebb" Nov 28 06:57:05 crc kubenswrapper[4922]: I1128 06:57:05.783744 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:57:06 crc kubenswrapper[4922]: I1128 06:57:06.062480 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"4329dd7649e3808a3ea3bafb227060fec4fed78e9f5e934124247393e6835fb9"} Nov 28 06:57:06 crc kubenswrapper[4922]: I1128 06:57:06.062541 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"2bc512915b71639ca826e4c53a432728721cfb221725dfa20245345670e96b18"} Nov 28 06:57:06 crc kubenswrapper[4922]: I1128 06:57:06.063168 4922 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:57:06 crc kubenswrapper[4922]: I1128 06:57:06.063215 4922 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="78d1b075-6126-476f-8318-483aeaa7b542" Nov 28 06:57:06 crc kubenswrapper[4922]: I1128 06:57:06.078590 4922 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="049075bf-45f8-4692-868a-908757107ebb" Nov 28 06:57:08 crc kubenswrapper[4922]: I1128 06:57:08.010149 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:57:08 crc kubenswrapper[4922]: I1128 06:57:08.019455 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:57:11 crc kubenswrapper[4922]: I1128 06:57:11.285266 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 06:57:11 crc kubenswrapper[4922]: I1128 06:57:11.957511 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 06:57:12 crc kubenswrapper[4922]: I1128 06:57:12.097696 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 06:57:12 crc kubenswrapper[4922]: I1128 06:57:12.163747 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 06:57:12 crc kubenswrapper[4922]: I1128 06:57:12.327992 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 06:57:12 crc kubenswrapper[4922]: I1128 06:57:12.549165 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 06:57:12 crc kubenswrapper[4922]: I1128 06:57:12.633618 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 06:57:12 crc kubenswrapper[4922]: I1128 06:57:12.688100 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 06:57:12 crc kubenswrapper[4922]: I1128 06:57:12.710566 4922 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 06:57:12 crc kubenswrapper[4922]: I1128 06:57:12.846132 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 06:57:13 crc kubenswrapper[4922]: I1128 06:57:13.403063 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 06:57:13 crc kubenswrapper[4922]: I1128 06:57:13.511788 4922 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 06:57:13 crc kubenswrapper[4922]: I1128 06:57:13.633742 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 06:57:13 crc kubenswrapper[4922]: I1128 06:57:13.852424 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 06:57:13 crc kubenswrapper[4922]: I1128 06:57:13.967749 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 06:57:14 crc kubenswrapper[4922]: I1128 06:57:14.059796 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 06:57:14 crc kubenswrapper[4922]: I1128 06:57:14.134985 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 06:57:14 crc kubenswrapper[4922]: I1128 06:57:14.634263 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 06:57:14 crc kubenswrapper[4922]: I1128 06:57:14.743077 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 06:57:14 crc kubenswrapper[4922]: I1128 06:57:14.845118 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 06:57:14 crc kubenswrapper[4922]: I1128 06:57:14.902337 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 06:57:14 crc kubenswrapper[4922]: I1128 06:57:14.981314 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 06:57:15 crc kubenswrapper[4922]: I1128 06:57:15.010885 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 06:57:15 crc kubenswrapper[4922]: I1128 06:57:15.180069 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 06:57:15 crc kubenswrapper[4922]: I1128 06:57:15.364298 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 06:57:15 crc kubenswrapper[4922]: I1128 06:57:15.661794 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 06:57:15 crc kubenswrapper[4922]: I1128 06:57:15.790118 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 06:57:15 crc kubenswrapper[4922]: I1128 06:57:15.796894 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 06:57:15 crc kubenswrapper[4922]: I1128 06:57:15.816551 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.010271 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.263323 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.274787 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.307030 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.316269 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.406669 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.465515 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.535853 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 06:57:16 crc kubenswrapper[4922]: I1128 06:57:16.576659 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 06:57:17 crc kubenswrapper[4922]: I1128 06:57:17.001387 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 06:57:17 crc kubenswrapper[4922]: I1128 06:57:17.079970 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 06:57:17 crc kubenswrapper[4922]: I1128 06:57:17.393783 4922 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 06:57:18 crc kubenswrapper[4922]: I1128 06:57:18.013648 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 06:57:18 crc kubenswrapper[4922]: I1128 06:57:18.075637 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 06:57:18 crc kubenswrapper[4922]: I1128 06:57:18.086476 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 06:57:18 crc kubenswrapper[4922]: I1128 06:57:18.138107 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 06:57:18 crc kubenswrapper[4922]: I1128 06:57:18.992601 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.102680 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.135860 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.212343 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.376342 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.393288 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.567148 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.685364 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.760999 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.785048 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.787146 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.808769 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.948451 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 06:57:19 crc kubenswrapper[4922]: I1128 06:57:19.979730 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.112769 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.134330 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.135181 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.276298 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.545368 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.605491 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.671531 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.772483 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.788836 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.807787 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.977976 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.992338 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 06:57:20 crc kubenswrapper[4922]: I1128 06:57:20.993985 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.041968 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.053293 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.068587 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.097935 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.129204 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.147808 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.237834 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.241392 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.352451 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.402720 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.436719 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.470384 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.585732 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.588989 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.702422 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.718928 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.724723 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 06:57:21 crc kubenswrapper[4922]: I1128 06:57:21.857867 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.115269 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.410418 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.514731 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.585466 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.591013 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.720559 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.732994 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.794381 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.881010 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.955836 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 06:57:22 crc kubenswrapper[4922]: I1128 06:57:22.980291 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.045747 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.159886 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.191447 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.284398 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.324923 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.332942 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.358935 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.368578 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.528937 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.671900 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.696519 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.743109 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.771727 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.836117 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.857403 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 06:57:23 crc kubenswrapper[4922]: I1128 06:57:23.883388 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.055855 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.067537 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.087190 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.123473 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.158804 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.216685 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.315136 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.323801 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.339565 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.489111 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.591028 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.594955 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.748984 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.801351 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.882320 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 06:57:24 crc kubenswrapper[4922]: I1128 06:57:24.993905 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.116649 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.184461 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.246047 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.271133 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.502607 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.537186 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.566399 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.593601 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.674958 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.811786 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.890880 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.894877 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.912491 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 06:57:25 crc kubenswrapper[4922]: I1128 06:57:25.974280 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.033003 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.142433 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.236513 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.241245 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.241622 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.300138 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.361173 4922 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.363698 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=40.363681884 podStartE2EDuration="40.363681884s" podCreationTimestamp="2025-11-28 06:56:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:57:05.087024332 +0000 UTC m=+270.007419924" watchObservedRunningTime="2025-11-28 06:57:26.363681884 +0000 UTC m=+291.284077476" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.366003 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-bqwm2","openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.366061 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.370838 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.392367 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=21.392350745999998 podStartE2EDuration="21.392350746s" podCreationTimestamp="2025-11-28 06:57:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:57:26.390747061 +0000 UTC m=+291.311142683" watchObservedRunningTime="2025-11-28 06:57:26.392350746 +0000 UTC m=+291.312746328" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.537684 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.593686 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.694454 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.770580 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.790560 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 06:57:26 crc kubenswrapper[4922]: I1128 06:57:26.802513 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.029588 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.079000 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.083043 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.181382 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.184054 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.184456 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.257571 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.343477 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.403471 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d952449e-c825-43d3-a591-0be473db6a53" path="/var/lib/kubelet/pods/d952449e-c825-43d3-a591-0be473db6a53/volumes" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.412168 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.424325 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.543291 4922 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.543570 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad" gracePeriod=5 Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.566272 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.584568 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx"] Nov 28 06:57:27 crc kubenswrapper[4922]: E1128 06:57:27.584761 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d952449e-c825-43d3-a591-0be473db6a53" containerName="oauth-openshift" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.584772 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="d952449e-c825-43d3-a591-0be473db6a53" containerName="oauth-openshift" Nov 28 06:57:27 crc kubenswrapper[4922]: E1128 06:57:27.584780 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.584787 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 06:57:27 crc kubenswrapper[4922]: E1128 06:57:27.584795 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" containerName="installer" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.584801 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" containerName="installer" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.584904 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="d952449e-c825-43d3-a591-0be473db6a53" containerName="oauth-openshift" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.584921 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5da4dd4-e0f3-46c8-88cd-e84251d03b1b" containerName="installer" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.584929 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.585295 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.593697 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.595687 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.596571 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.596173 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.596330 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.597973 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.598315 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.598564 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.598770 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.598952 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.599530 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.601342 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx"] Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.603404 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.608123 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.614381 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.615180 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.632772 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.644293 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.683996 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-audit-policies\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684064 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684122 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684237 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684308 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-service-ca\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684381 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-error\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684427 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684469 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhns8\" (UniqueName: \"kubernetes.io/projected/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-kube-api-access-rhns8\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684490 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-router-certs\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684510 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-login\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684550 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-audit-dir\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684599 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684648 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-session\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.684700 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.728847 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.729408 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.785678 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-login\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.785783 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-audit-dir\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.785857 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.785913 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-session\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.785978 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.785995 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-audit-dir\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786036 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-audit-policies\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786082 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786125 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786213 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786316 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-service-ca\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786393 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-error\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786467 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786516 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhns8\" (UniqueName: \"kubernetes.io/projected/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-kube-api-access-rhns8\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.786606 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-router-certs\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.789360 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-audit-policies\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.789408 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-service-ca\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.789879 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.791258 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.812554 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-login\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.813670 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-session\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.813784 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.814340 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-router-certs\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.816200 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.816669 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-error\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.817020 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.817303 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.817968 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.821963 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhns8\" (UniqueName: \"kubernetes.io/projected/0a15f007-f3cc-4c50-b4e6-e948f21e1fe0-kube-api-access-rhns8\") pod \"oauth-openshift-6c8d5d4f46-sw7xx\" (UID: \"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0\") " pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.875651 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.906751 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:27 crc kubenswrapper[4922]: I1128 06:57:27.930101 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.132992 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx"] Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.256792 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" event={"ID":"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0","Type":"ContainerStarted","Data":"65ae6ff018279c3ee888a3979799004070867b0a474c7a3657ade025e8bbac2e"} Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.379917 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.381399 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.406425 4922 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.582882 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.654754 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.680734 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.755714 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.771624 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 06:57:28 crc kubenswrapper[4922]: I1128 06:57:28.877155 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.020531 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.243289 4922 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.264809 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" event={"ID":"0a15f007-f3cc-4c50-b4e6-e948f21e1fe0","Type":"ContainerStarted","Data":"e039ae6f57915a3227053167b812d9b4067c0b4f3a71f9d499a33d29656647d3"} Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.265197 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.272061 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.305726 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.320414 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6c8d5d4f46-sw7xx" podStartSLOduration=68.320388876 podStartE2EDuration="1m8.320388876s" podCreationTimestamp="2025-11-28 06:56:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:57:29.295932082 +0000 UTC m=+294.216327694" watchObservedRunningTime="2025-11-28 06:57:29.320388876 +0000 UTC m=+294.240784498" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.371116 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.373123 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.377086 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.462277 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.507850 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.538473 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.610034 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.622577 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.686993 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.687269 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.691061 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.804356 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 06:57:29 crc kubenswrapper[4922]: I1128 06:57:29.940305 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.080015 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.334651 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.349304 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.355593 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.441629 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.587148 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.604412 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.625849 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.627273 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.859833 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 06:57:30 crc kubenswrapper[4922]: I1128 06:57:30.941259 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.141801 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.143421 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.242574 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.366627 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.508170 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.597342 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.634805 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.653037 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.688503 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.821048 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.895142 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 06:57:31 crc kubenswrapper[4922]: I1128 06:57:31.961678 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 06:57:32 crc kubenswrapper[4922]: I1128 06:57:32.149855 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 06:57:32 crc kubenswrapper[4922]: I1128 06:57:32.236357 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 06:57:32 crc kubenswrapper[4922]: I1128 06:57:32.302807 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 06:57:32 crc kubenswrapper[4922]: I1128 06:57:32.840075 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 06:57:32 crc kubenswrapper[4922]: I1128 06:57:32.849368 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.046031 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.153607 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.153969 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.272075 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.272466 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.272828 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.272969 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.272214 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.272974 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.273034 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.273094 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.273424 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.273829 4922 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.273876 4922 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.273896 4922 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.273912 4922 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.279523 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.284409 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.293616 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.293861 4922 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad" exitCode=137 Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.293959 4922 scope.go:117] "RemoveContainer" containerID="5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.293967 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.348806 4922 scope.go:117] "RemoveContainer" containerID="5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad" Nov 28 06:57:33 crc kubenswrapper[4922]: E1128 06:57:33.349458 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad\": container with ID starting with 5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad not found: ID does not exist" containerID="5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.349530 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad"} err="failed to get container status \"5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad\": rpc error: code = NotFound desc = could not find container \"5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad\": container with ID starting with 5db12bb61244978bbac8599217c66d132302969e738bc58a804cf67418aef0ad not found: ID does not exist" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.375730 4922 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.409811 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.410106 4922 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.423333 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.423520 4922 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ac3cb9a5-f602-4601-b751-85f0881f98a2" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.427270 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.427313 4922 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ac3cb9a5-f602-4601-b751-85f0881f98a2" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.511133 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.911314 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 06:57:33 crc kubenswrapper[4922]: I1128 06:57:33.911774 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 06:57:49 crc kubenswrapper[4922]: I1128 06:57:49.400747 4922 generic.go:334] "Generic (PLEG): container finished" podID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerID="e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c" exitCode=0 Nov 28 06:57:49 crc kubenswrapper[4922]: I1128 06:57:49.408851 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" event={"ID":"7a50e5be-5b15-472d-a504-3dc449b474e6","Type":"ContainerDied","Data":"e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c"} Nov 28 06:57:49 crc kubenswrapper[4922]: I1128 06:57:49.409517 4922 scope.go:117] "RemoveContainer" containerID="e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c" Nov 28 06:57:50 crc kubenswrapper[4922]: I1128 06:57:50.408281 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" event={"ID":"7a50e5be-5b15-472d-a504-3dc449b474e6","Type":"ContainerStarted","Data":"53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f"} Nov 28 06:57:50 crc kubenswrapper[4922]: I1128 06:57:50.408840 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:57:50 crc kubenswrapper[4922]: I1128 06:57:50.411657 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:57:53 crc kubenswrapper[4922]: I1128 06:57:53.501249 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlthx"] Nov 28 06:57:53 crc kubenswrapper[4922]: I1128 06:57:53.501831 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" podUID="fede5fe4-d38c-46de-b334-32e9f56cf110" containerName="controller-manager" containerID="cri-o://7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd" gracePeriod=30 Nov 28 06:57:53 crc kubenswrapper[4922]: I1128 06:57:53.591529 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl"] Nov 28 06:57:53 crc kubenswrapper[4922]: I1128 06:57:53.592059 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" podUID="1aefeaa5-e3f1-4aed-b152-35d380c3f87b" containerName="route-controller-manager" containerID="cri-o://dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6" gracePeriod=30 Nov 28 06:57:53 crc kubenswrapper[4922]: I1128 06:57:53.942793 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:57:53 crc kubenswrapper[4922]: I1128 06:57:53.949182 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011581 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87lkg\" (UniqueName: \"kubernetes.io/projected/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-kube-api-access-87lkg\") pod \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011636 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fede5fe4-d38c-46de-b334-32e9f56cf110-serving-cert\") pod \"fede5fe4-d38c-46de-b334-32e9f56cf110\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011670 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-config\") pod \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011698 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-config\") pod \"fede5fe4-d38c-46de-b334-32e9f56cf110\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011732 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-proxy-ca-bundles\") pod \"fede5fe4-d38c-46de-b334-32e9f56cf110\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011781 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bz7dq\" (UniqueName: \"kubernetes.io/projected/fede5fe4-d38c-46de-b334-32e9f56cf110-kube-api-access-bz7dq\") pod \"fede5fe4-d38c-46de-b334-32e9f56cf110\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011805 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-serving-cert\") pod \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011841 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-client-ca\") pod \"fede5fe4-d38c-46de-b334-32e9f56cf110\" (UID: \"fede5fe4-d38c-46de-b334-32e9f56cf110\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.011897 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-client-ca\") pod \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\" (UID: \"1aefeaa5-e3f1-4aed-b152-35d380c3f87b\") " Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.012476 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-config" (OuterVolumeSpecName: "config") pod "1aefeaa5-e3f1-4aed-b152-35d380c3f87b" (UID: "1aefeaa5-e3f1-4aed-b152-35d380c3f87b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.012519 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-client-ca" (OuterVolumeSpecName: "client-ca") pod "1aefeaa5-e3f1-4aed-b152-35d380c3f87b" (UID: "1aefeaa5-e3f1-4aed-b152-35d380c3f87b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.012615 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-config" (OuterVolumeSpecName: "config") pod "fede5fe4-d38c-46de-b334-32e9f56cf110" (UID: "fede5fe4-d38c-46de-b334-32e9f56cf110"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.013285 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-client-ca" (OuterVolumeSpecName: "client-ca") pod "fede5fe4-d38c-46de-b334-32e9f56cf110" (UID: "fede5fe4-d38c-46de-b334-32e9f56cf110"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.013304 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "fede5fe4-d38c-46de-b334-32e9f56cf110" (UID: "fede5fe4-d38c-46de-b334-32e9f56cf110"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.018127 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fede5fe4-d38c-46de-b334-32e9f56cf110-kube-api-access-bz7dq" (OuterVolumeSpecName: "kube-api-access-bz7dq") pod "fede5fe4-d38c-46de-b334-32e9f56cf110" (UID: "fede5fe4-d38c-46de-b334-32e9f56cf110"). InnerVolumeSpecName "kube-api-access-bz7dq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.018256 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1aefeaa5-e3f1-4aed-b152-35d380c3f87b" (UID: "1aefeaa5-e3f1-4aed-b152-35d380c3f87b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.018361 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-kube-api-access-87lkg" (OuterVolumeSpecName: "kube-api-access-87lkg") pod "1aefeaa5-e3f1-4aed-b152-35d380c3f87b" (UID: "1aefeaa5-e3f1-4aed-b152-35d380c3f87b"). InnerVolumeSpecName "kube-api-access-87lkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.018372 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fede5fe4-d38c-46de-b334-32e9f56cf110-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fede5fe4-d38c-46de-b334-32e9f56cf110" (UID: "fede5fe4-d38c-46de-b334-32e9f56cf110"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113710 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113758 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bz7dq\" (UniqueName: \"kubernetes.io/projected/fede5fe4-d38c-46de-b334-32e9f56cf110-kube-api-access-bz7dq\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113772 4922 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113783 4922 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113794 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87lkg\" (UniqueName: \"kubernetes.io/projected/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-kube-api-access-87lkg\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113805 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fede5fe4-d38c-46de-b334-32e9f56cf110-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113818 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1aefeaa5-e3f1-4aed-b152-35d380c3f87b-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113831 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.113843 4922 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fede5fe4-d38c-46de-b334-32e9f56cf110-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.430845 4922 generic.go:334] "Generic (PLEG): container finished" podID="1aefeaa5-e3f1-4aed-b152-35d380c3f87b" containerID="dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6" exitCode=0 Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.430889 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.430921 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" event={"ID":"1aefeaa5-e3f1-4aed-b152-35d380c3f87b","Type":"ContainerDied","Data":"dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6"} Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.430961 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl" event={"ID":"1aefeaa5-e3f1-4aed-b152-35d380c3f87b","Type":"ContainerDied","Data":"4dd1dc28c70662a0d690ebffa4d4a09d15ce47c18d9375bad4dd0d5016896980"} Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.430981 4922 scope.go:117] "RemoveContainer" containerID="dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.434821 4922 generic.go:334] "Generic (PLEG): container finished" podID="fede5fe4-d38c-46de-b334-32e9f56cf110" containerID="7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd" exitCode=0 Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.434859 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" event={"ID":"fede5fe4-d38c-46de-b334-32e9f56cf110","Type":"ContainerDied","Data":"7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd"} Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.434887 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" event={"ID":"fede5fe4-d38c-46de-b334-32e9f56cf110","Type":"ContainerDied","Data":"265157a4986ec26a5131405d0872c17d92257f0f440decae6ac9d35584072e74"} Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.434948 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-xlthx" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.460044 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl"] Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.465804 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-qckrl"] Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.467091 4922 scope.go:117] "RemoveContainer" containerID="dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6" Nov 28 06:57:54 crc kubenswrapper[4922]: E1128 06:57:54.467699 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6\": container with ID starting with dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6 not found: ID does not exist" containerID="dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.467729 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6"} err="failed to get container status \"dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6\": rpc error: code = NotFound desc = could not find container \"dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6\": container with ID starting with dcc895042ec3a09e388d51c22a74ecf2d8659b5833fe1945d74a0ccc89e330c6 not found: ID does not exist" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.467747 4922 scope.go:117] "RemoveContainer" containerID="7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.479408 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlthx"] Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.483262 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-xlthx"] Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.495952 4922 scope.go:117] "RemoveContainer" containerID="7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd" Nov 28 06:57:54 crc kubenswrapper[4922]: E1128 06:57:54.496560 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd\": container with ID starting with 7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd not found: ID does not exist" containerID="7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd" Nov 28 06:57:54 crc kubenswrapper[4922]: I1128 06:57:54.496603 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd"} err="failed to get container status \"7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd\": rpc error: code = NotFound desc = could not find container \"7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd\": container with ID starting with 7b65c9201d746b26486f46a68fec8f67b36698ee79732d6d1b114350b13b68dd not found: ID does not exist" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.220120 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8"] Nov 28 06:57:55 crc kubenswrapper[4922]: E1128 06:57:55.220652 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fede5fe4-d38c-46de-b334-32e9f56cf110" containerName="controller-manager" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.220689 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="fede5fe4-d38c-46de-b334-32e9f56cf110" containerName="controller-manager" Nov 28 06:57:55 crc kubenswrapper[4922]: E1128 06:57:55.220733 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aefeaa5-e3f1-4aed-b152-35d380c3f87b" containerName="route-controller-manager" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.220753 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aefeaa5-e3f1-4aed-b152-35d380c3f87b" containerName="route-controller-manager" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.220968 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="fede5fe4-d38c-46de-b334-32e9f56cf110" containerName="controller-manager" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.221007 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aefeaa5-e3f1-4aed-b152-35d380c3f87b" containerName="route-controller-manager" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.221823 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.228494 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-55d568895d-hdfv2"] Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.231171 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.231529 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.231902 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.231956 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.232321 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.234747 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.239500 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.244862 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.248306 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.248909 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.249081 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.250364 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.272037 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.283762 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8"] Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.284633 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.290177 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-55d568895d-hdfv2"] Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.332308 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed6effb9-e602-41e9-acd4-273251da4a07-serving-cert\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.332417 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-client-ca\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.339050 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-proxy-ca-bundles\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.339118 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vn6g2\" (UniqueName: \"kubernetes.io/projected/706cad39-0e87-44d4-9879-1328daa634f4-kube-api-access-vn6g2\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.339170 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-config\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.339196 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-config\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.339274 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk9xw\" (UniqueName: \"kubernetes.io/projected/ed6effb9-e602-41e9-acd4-273251da4a07-kube-api-access-tk9xw\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.339298 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-client-ca\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.339388 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/706cad39-0e87-44d4-9879-1328daa634f4-serving-cert\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.407960 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1aefeaa5-e3f1-4aed-b152-35d380c3f87b" path="/var/lib/kubelet/pods/1aefeaa5-e3f1-4aed-b152-35d380c3f87b/volumes" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.408666 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fede5fe4-d38c-46de-b334-32e9f56cf110" path="/var/lib/kubelet/pods/fede5fe4-d38c-46de-b334-32e9f56cf110/volumes" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.440088 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed6effb9-e602-41e9-acd4-273251da4a07-serving-cert\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.440137 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-client-ca\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.440163 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-proxy-ca-bundles\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.441176 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-client-ca\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.441759 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-proxy-ca-bundles\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.441838 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vn6g2\" (UniqueName: \"kubernetes.io/projected/706cad39-0e87-44d4-9879-1328daa634f4-kube-api-access-vn6g2\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.441928 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-config\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.441954 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-config\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.444392 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-client-ca\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.444434 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk9xw\" (UniqueName: \"kubernetes.io/projected/ed6effb9-e602-41e9-acd4-273251da4a07-kube-api-access-tk9xw\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.444483 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/706cad39-0e87-44d4-9879-1328daa634f4-serving-cert\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.445652 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-config\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.445706 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-config\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.446001 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-client-ca\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.448946 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed6effb9-e602-41e9-acd4-273251da4a07-serving-cert\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.457906 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/706cad39-0e87-44d4-9879-1328daa634f4-serving-cert\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.459442 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vn6g2\" (UniqueName: \"kubernetes.io/projected/706cad39-0e87-44d4-9879-1328daa634f4-kube-api-access-vn6g2\") pod \"controller-manager-55d568895d-hdfv2\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.466567 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk9xw\" (UniqueName: \"kubernetes.io/projected/ed6effb9-e602-41e9-acd4-273251da4a07-kube-api-access-tk9xw\") pod \"route-controller-manager-57d65d459f-jzbf8\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.564988 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.582416 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.820643 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55d568895d-hdfv2"] Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.846338 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8"] Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.919955 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8"] Nov 28 06:57:55 crc kubenswrapper[4922]: I1128 06:57:55.964860 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55d568895d-hdfv2"] Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.453209 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" event={"ID":"706cad39-0e87-44d4-9879-1328daa634f4","Type":"ContainerStarted","Data":"9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754"} Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.453540 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.453553 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" event={"ID":"706cad39-0e87-44d4-9879-1328daa634f4","Type":"ContainerStarted","Data":"454f65e0242e0f7726e5951070c8520f8555f0772d8dd45af4124c7ee301a03b"} Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.453317 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" podUID="706cad39-0e87-44d4-9879-1328daa634f4" containerName="controller-manager" containerID="cri-o://9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754" gracePeriod=30 Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.455535 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" event={"ID":"ed6effb9-e602-41e9-acd4-273251da4a07","Type":"ContainerStarted","Data":"1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27"} Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.455560 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" event={"ID":"ed6effb9-e602-41e9-acd4-273251da4a07","Type":"ContainerStarted","Data":"a517b13f6736b29c4d74edd3a8a2b2abf9e935b592381ff43072bf6042d45d85"} Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.455694 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" podUID="ed6effb9-e602-41e9-acd4-273251da4a07" containerName="route-controller-manager" containerID="cri-o://1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27" gracePeriod=30 Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.455963 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.460433 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.464784 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.477556 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" podStartSLOduration=3.477533926 podStartE2EDuration="3.477533926s" podCreationTimestamp="2025-11-28 06:57:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:57:56.475768017 +0000 UTC m=+321.396163599" watchObservedRunningTime="2025-11-28 06:57:56.477533926 +0000 UTC m=+321.397929508" Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.497061 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" podStartSLOduration=3.497045664 podStartE2EDuration="3.497045664s" podCreationTimestamp="2025-11-28 06:57:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:57:56.495426539 +0000 UTC m=+321.415822141" watchObservedRunningTime="2025-11-28 06:57:56.497045664 +0000 UTC m=+321.417441246" Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.914237 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:56 crc kubenswrapper[4922]: I1128 06:57:56.918777 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.079857 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-client-ca\") pod \"706cad39-0e87-44d4-9879-1328daa634f4\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.079948 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-proxy-ca-bundles\") pod \"706cad39-0e87-44d4-9879-1328daa634f4\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080032 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/706cad39-0e87-44d4-9879-1328daa634f4-serving-cert\") pod \"706cad39-0e87-44d4-9879-1328daa634f4\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080057 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk9xw\" (UniqueName: \"kubernetes.io/projected/ed6effb9-e602-41e9-acd4-273251da4a07-kube-api-access-tk9xw\") pod \"ed6effb9-e602-41e9-acd4-273251da4a07\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080093 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed6effb9-e602-41e9-acd4-273251da4a07-serving-cert\") pod \"ed6effb9-e602-41e9-acd4-273251da4a07\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080113 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vn6g2\" (UniqueName: \"kubernetes.io/projected/706cad39-0e87-44d4-9879-1328daa634f4-kube-api-access-vn6g2\") pod \"706cad39-0e87-44d4-9879-1328daa634f4\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080131 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-config\") pod \"ed6effb9-e602-41e9-acd4-273251da4a07\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080166 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-client-ca\") pod \"ed6effb9-e602-41e9-acd4-273251da4a07\" (UID: \"ed6effb9-e602-41e9-acd4-273251da4a07\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080198 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-config\") pod \"706cad39-0e87-44d4-9879-1328daa634f4\" (UID: \"706cad39-0e87-44d4-9879-1328daa634f4\") " Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080867 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "706cad39-0e87-44d4-9879-1328daa634f4" (UID: "706cad39-0e87-44d4-9879-1328daa634f4"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.080935 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-client-ca" (OuterVolumeSpecName: "client-ca") pod "ed6effb9-e602-41e9-acd4-273251da4a07" (UID: "ed6effb9-e602-41e9-acd4-273251da4a07"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.081079 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-config" (OuterVolumeSpecName: "config") pod "ed6effb9-e602-41e9-acd4-273251da4a07" (UID: "ed6effb9-e602-41e9-acd4-273251da4a07"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.085094 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed6effb9-e602-41e9-acd4-273251da4a07-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ed6effb9-e602-41e9-acd4-273251da4a07" (UID: "ed6effb9-e602-41e9-acd4-273251da4a07"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.085797 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-config" (OuterVolumeSpecName: "config") pod "706cad39-0e87-44d4-9879-1328daa634f4" (UID: "706cad39-0e87-44d4-9879-1328daa634f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.085838 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-client-ca" (OuterVolumeSpecName: "client-ca") pod "706cad39-0e87-44d4-9879-1328daa634f4" (UID: "706cad39-0e87-44d4-9879-1328daa634f4"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.085984 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed6effb9-e602-41e9-acd4-273251da4a07-kube-api-access-tk9xw" (OuterVolumeSpecName: "kube-api-access-tk9xw") pod "ed6effb9-e602-41e9-acd4-273251da4a07" (UID: "ed6effb9-e602-41e9-acd4-273251da4a07"). InnerVolumeSpecName "kube-api-access-tk9xw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.091348 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/706cad39-0e87-44d4-9879-1328daa634f4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "706cad39-0e87-44d4-9879-1328daa634f4" (UID: "706cad39-0e87-44d4-9879-1328daa634f4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.091572 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/706cad39-0e87-44d4-9879-1328daa634f4-kube-api-access-vn6g2" (OuterVolumeSpecName: "kube-api-access-vn6g2") pod "706cad39-0e87-44d4-9879-1328daa634f4" (UID: "706cad39-0e87-44d4-9879-1328daa634f4"). InnerVolumeSpecName "kube-api-access-vn6g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182004 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk9xw\" (UniqueName: \"kubernetes.io/projected/ed6effb9-e602-41e9-acd4-273251da4a07-kube-api-access-tk9xw\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182035 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed6effb9-e602-41e9-acd4-273251da4a07-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182045 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vn6g2\" (UniqueName: \"kubernetes.io/projected/706cad39-0e87-44d4-9879-1328daa634f4-kube-api-access-vn6g2\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182054 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182063 4922 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ed6effb9-e602-41e9-acd4-273251da4a07-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182072 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182079 4922 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182087 4922 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/706cad39-0e87-44d4-9879-1328daa634f4-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.182095 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/706cad39-0e87-44d4-9879-1328daa634f4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.215887 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv"] Nov 28 06:57:57 crc kubenswrapper[4922]: E1128 06:57:57.225004 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="706cad39-0e87-44d4-9879-1328daa634f4" containerName="controller-manager" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.225050 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="706cad39-0e87-44d4-9879-1328daa634f4" containerName="controller-manager" Nov 28 06:57:57 crc kubenswrapper[4922]: E1128 06:57:57.225080 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed6effb9-e602-41e9-acd4-273251da4a07" containerName="route-controller-manager" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.225089 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed6effb9-e602-41e9-acd4-273251da4a07" containerName="route-controller-manager" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.225308 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed6effb9-e602-41e9-acd4-273251da4a07" containerName="route-controller-manager" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.225327 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="706cad39-0e87-44d4-9879-1328daa634f4" containerName="controller-manager" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.225845 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.229947 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6c59b458cf-64w5v"] Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.230906 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.238781 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c59b458cf-64w5v"] Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.244550 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv"] Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.385601 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-config\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.385648 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-config\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.385674 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-client-ca\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.385693 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e536b56-424b-4041-9cb5-63c5edb6bd99-serving-cert\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.385707 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5686f5d7-4c34-459c-addc-f4a9335238d2-serving-cert\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.386012 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cft2r\" (UniqueName: \"kubernetes.io/projected/5686f5d7-4c34-459c-addc-f4a9335238d2-kube-api-access-cft2r\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.386071 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l6fs\" (UniqueName: \"kubernetes.io/projected/9e536b56-424b-4041-9cb5-63c5edb6bd99-kube-api-access-6l6fs\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.386128 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-client-ca\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.386165 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-proxy-ca-bundles\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.461505 4922 generic.go:334] "Generic (PLEG): container finished" podID="ed6effb9-e602-41e9-acd4-273251da4a07" containerID="1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27" exitCode=0 Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.461597 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.461585 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" event={"ID":"ed6effb9-e602-41e9-acd4-273251da4a07","Type":"ContainerDied","Data":"1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27"} Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.461757 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8" event={"ID":"ed6effb9-e602-41e9-acd4-273251da4a07","Type":"ContainerDied","Data":"a517b13f6736b29c4d74edd3a8a2b2abf9e935b592381ff43072bf6042d45d85"} Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.461782 4922 scope.go:117] "RemoveContainer" containerID="1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.464306 4922 generic.go:334] "Generic (PLEG): container finished" podID="706cad39-0e87-44d4-9879-1328daa634f4" containerID="9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754" exitCode=0 Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.464414 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.464375 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" event={"ID":"706cad39-0e87-44d4-9879-1328daa634f4","Type":"ContainerDied","Data":"9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754"} Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.464790 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-55d568895d-hdfv2" event={"ID":"706cad39-0e87-44d4-9879-1328daa634f4","Type":"ContainerDied","Data":"454f65e0242e0f7726e5951070c8520f8555f0772d8dd45af4124c7ee301a03b"} Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.478032 4922 scope.go:117] "RemoveContainer" containerID="1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.478374 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8"] Nov 28 06:57:57 crc kubenswrapper[4922]: E1128 06:57:57.478458 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27\": container with ID starting with 1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27 not found: ID does not exist" containerID="1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.478491 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27"} err="failed to get container status \"1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27\": rpc error: code = NotFound desc = could not find container \"1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27\": container with ID starting with 1f7ed4e717bf93ea2cd4e43a5fcd5e1a529e6555c4cc08fb246f181247415d27 not found: ID does not exist" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.478518 4922 scope.go:117] "RemoveContainer" containerID="9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.484890 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-57d65d459f-jzbf8"] Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487709 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-proxy-ca-bundles\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487776 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-config\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487797 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-config\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487815 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-client-ca\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487836 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e536b56-424b-4041-9cb5-63c5edb6bd99-serving-cert\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487852 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5686f5d7-4c34-459c-addc-f4a9335238d2-serving-cert\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487890 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cft2r\" (UniqueName: \"kubernetes.io/projected/5686f5d7-4c34-459c-addc-f4a9335238d2-kube-api-access-cft2r\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487907 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l6fs\" (UniqueName: \"kubernetes.io/projected/9e536b56-424b-4041-9cb5-63c5edb6bd99-kube-api-access-6l6fs\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.487923 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-client-ca\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.488966 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-client-ca\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.489325 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-proxy-ca-bundles\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.489631 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-config\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.490006 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-config\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.493122 4922 scope.go:117] "RemoveContainer" containerID="9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.493413 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-client-ca\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.493481 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e536b56-424b-4041-9cb5-63c5edb6bd99-serving-cert\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: E1128 06:57:57.494269 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754\": container with ID starting with 9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754 not found: ID does not exist" containerID="9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.494304 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754"} err="failed to get container status \"9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754\": rpc error: code = NotFound desc = could not find container \"9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754\": container with ID starting with 9f5c37519643ff8e16c97e02d3430c8faa05ed4c4b3b88685e9ca4366f97e754 not found: ID does not exist" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.494428 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5686f5d7-4c34-459c-addc-f4a9335238d2-serving-cert\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.499826 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-55d568895d-hdfv2"] Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.507109 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l6fs\" (UniqueName: \"kubernetes.io/projected/9e536b56-424b-4041-9cb5-63c5edb6bd99-kube-api-access-6l6fs\") pod \"route-controller-manager-6cdcb8b5d6-kcnnv\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.511609 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-55d568895d-hdfv2"] Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.513767 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cft2r\" (UniqueName: \"kubernetes.io/projected/5686f5d7-4c34-459c-addc-f4a9335238d2-kube-api-access-cft2r\") pod \"controller-manager-6c59b458cf-64w5v\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.545378 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.557698 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.805482 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv"] Nov 28 06:57:57 crc kubenswrapper[4922]: I1128 06:57:57.963923 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c59b458cf-64w5v"] Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.470262 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" event={"ID":"5686f5d7-4c34-459c-addc-f4a9335238d2","Type":"ContainerStarted","Data":"8783dcf357914bed469f9108dbe6e45affd4e86c285e240b65312f111a532b89"} Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.470305 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" event={"ID":"5686f5d7-4c34-459c-addc-f4a9335238d2","Type":"ContainerStarted","Data":"67d1ed3a3dcfc2f32cfbc3213790c47bb140aa2a22f8ea5b78cde977e7cd5e31"} Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.470440 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.472239 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" event={"ID":"9e536b56-424b-4041-9cb5-63c5edb6bd99","Type":"ContainerStarted","Data":"c824e0d3ca95486feb30581033528fe2fd342e53dc90c13fb3c522b924c03206"} Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.472283 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" event={"ID":"9e536b56-424b-4041-9cb5-63c5edb6bd99","Type":"ContainerStarted","Data":"9d8a34f97a8488e23a49a9e37669830e6d530eb5857bad96ff7879fa5d9861f7"} Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.472419 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.477342 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.486197 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" podStartSLOduration=3.486174988 podStartE2EDuration="3.486174988s" podCreationTimestamp="2025-11-28 06:57:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:57:58.484876283 +0000 UTC m=+323.405271865" watchObservedRunningTime="2025-11-28 06:57:58.486174988 +0000 UTC m=+323.406570570" Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.488787 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:57:58 crc kubenswrapper[4922]: I1128 06:57:58.529959 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" podStartSLOduration=3.529943776 podStartE2EDuration="3.529943776s" podCreationTimestamp="2025-11-28 06:57:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:57:58.513320967 +0000 UTC m=+323.433716559" watchObservedRunningTime="2025-11-28 06:57:58.529943776 +0000 UTC m=+323.450339358" Nov 28 06:57:59 crc kubenswrapper[4922]: I1128 06:57:59.408921 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="706cad39-0e87-44d4-9879-1328daa634f4" path="/var/lib/kubelet/pods/706cad39-0e87-44d4-9879-1328daa634f4/volumes" Nov 28 06:57:59 crc kubenswrapper[4922]: I1128 06:57:59.410303 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed6effb9-e602-41e9-acd4-273251da4a07" path="/var/lib/kubelet/pods/ed6effb9-e602-41e9-acd4-273251da4a07/volumes" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.269366 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gn5p2"] Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.271088 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.293423 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gn5p2"] Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.383622 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.383708 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-registry-tls\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.383737 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-bound-sa-token\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.383788 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/77f7229e-3f4b-4440-aec7-7159077f25b5-trusted-ca\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.383870 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/77f7229e-3f4b-4440-aec7-7159077f25b5-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.383922 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/77f7229e-3f4b-4440-aec7-7159077f25b5-registry-certificates\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.384016 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn4fz\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-kube-api-access-xn4fz\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.384065 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/77f7229e-3f4b-4440-aec7-7159077f25b5-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.409299 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.485885 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/77f7229e-3f4b-4440-aec7-7159077f25b5-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.485958 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/77f7229e-3f4b-4440-aec7-7159077f25b5-registry-certificates\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.486031 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn4fz\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-kube-api-access-xn4fz\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.486058 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/77f7229e-3f4b-4440-aec7-7159077f25b5-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.486131 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-registry-tls\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.486155 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-bound-sa-token\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.486216 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/77f7229e-3f4b-4440-aec7-7159077f25b5-trusted-ca\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.487826 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/77f7229e-3f4b-4440-aec7-7159077f25b5-trusted-ca\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.488465 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/77f7229e-3f4b-4440-aec7-7159077f25b5-registry-certificates\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.492084 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/77f7229e-3f4b-4440-aec7-7159077f25b5-ca-trust-extracted\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.493993 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-registry-tls\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.497860 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/77f7229e-3f4b-4440-aec7-7159077f25b5-installation-pull-secrets\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.510726 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-bound-sa-token\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.515338 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn4fz\" (UniqueName: \"kubernetes.io/projected/77f7229e-3f4b-4440-aec7-7159077f25b5-kube-api-access-xn4fz\") pod \"image-registry-66df7c8f76-gn5p2\" (UID: \"77f7229e-3f4b-4440-aec7-7159077f25b5\") " pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:18 crc kubenswrapper[4922]: I1128 06:58:18.589093 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:19 crc kubenswrapper[4922]: I1128 06:58:19.060309 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-gn5p2"] Nov 28 06:58:19 crc kubenswrapper[4922]: W1128 06:58:19.065599 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77f7229e_3f4b_4440_aec7_7159077f25b5.slice/crio-bf20313955f26562911f174a41ea53aff66a033b7499813f92fe47f9e34cb7f7 WatchSource:0}: Error finding container bf20313955f26562911f174a41ea53aff66a033b7499813f92fe47f9e34cb7f7: Status 404 returned error can't find the container with id bf20313955f26562911f174a41ea53aff66a033b7499813f92fe47f9e34cb7f7 Nov 28 06:58:19 crc kubenswrapper[4922]: I1128 06:58:19.600126 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" event={"ID":"77f7229e-3f4b-4440-aec7-7159077f25b5","Type":"ContainerStarted","Data":"bf20313955f26562911f174a41ea53aff66a033b7499813f92fe47f9e34cb7f7"} Nov 28 06:58:22 crc kubenswrapper[4922]: I1128 06:58:22.619149 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" event={"ID":"77f7229e-3f4b-4440-aec7-7159077f25b5","Type":"ContainerStarted","Data":"84f723837b6ff71027f2daae48b996e85a2744e5c52859b01ec6f6a9e10fd762"} Nov 28 06:58:22 crc kubenswrapper[4922]: I1128 06:58:22.619598 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:22 crc kubenswrapper[4922]: I1128 06:58:22.637615 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" podStartSLOduration=4.637593101 podStartE2EDuration="4.637593101s" podCreationTimestamp="2025-11-28 06:58:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:58:22.636955553 +0000 UTC m=+347.557351145" watchObservedRunningTime="2025-11-28 06:58:22.637593101 +0000 UTC m=+347.557988693" Nov 28 06:58:27 crc kubenswrapper[4922]: I1128 06:58:27.311530 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 06:58:27 crc kubenswrapper[4922]: I1128 06:58:27.311819 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 06:58:38 crc kubenswrapper[4922]: I1128 06:58:38.597750 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-gn5p2" Nov 28 06:58:38 crc kubenswrapper[4922]: I1128 06:58:38.672141 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sckww"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.039747 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lppzs"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.042811 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lppzs" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerName="registry-server" containerID="cri-o://00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec" gracePeriod=30 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.049323 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fww6r"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.049531 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fww6r" podUID="23771385-c219-4713-9c79-d4802b2f13a7" containerName="registry-server" containerID="cri-o://c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c" gracePeriod=30 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.067315 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2t2xx"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.067557 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" containerID="cri-o://53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f" gracePeriod=30 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.080693 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9mhp"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.080916 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d9mhp" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" containerName="registry-server" containerID="cri-o://3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec" gracePeriod=30 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.094097 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qgkkf"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.094579 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qgkkf" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="registry-server" containerID="cri-o://18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36" gracePeriod=30 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.099758 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vpzh2"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.105139 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vpzh2"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.105254 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.153100 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccj2q\" (UniqueName: \"kubernetes.io/projected/75a072db-ed75-4f86-8fef-e5c9de393433-kube-api-access-ccj2q\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.153178 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/75a072db-ed75-4f86-8fef-e5c9de393433-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.153241 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/75a072db-ed75-4f86-8fef-e5c9de393433-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.256343 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/75a072db-ed75-4f86-8fef-e5c9de393433-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.256413 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccj2q\" (UniqueName: \"kubernetes.io/projected/75a072db-ed75-4f86-8fef-e5c9de393433-kube-api-access-ccj2q\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.256490 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/75a072db-ed75-4f86-8fef-e5c9de393433-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.257750 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/75a072db-ed75-4f86-8fef-e5c9de393433-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.271010 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/75a072db-ed75-4f86-8fef-e5c9de393433-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.273661 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccj2q\" (UniqueName: \"kubernetes.io/projected/75a072db-ed75-4f86-8fef-e5c9de393433-kube-api-access-ccj2q\") pod \"marketplace-operator-79b997595-vpzh2\" (UID: \"75a072db-ed75-4f86-8fef-e5c9de393433\") " pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.326170 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36 is running failed: container process not found" containerID="18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.326658 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36 is running failed: container process not found" containerID="18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.326993 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36 is running failed: container process not found" containerID="18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.327052 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-qgkkf" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="registry-server" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.553853 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.562287 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.571197 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.585268 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.589474 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.602868 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.668510 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-utilities\") pod \"5254d7c5-8faa-4ede-a82a-210426648d02\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.668597 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-operator-metrics\") pod \"7a50e5be-5b15-472d-a504-3dc449b474e6\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.668626 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-catalog-content\") pod \"23771385-c219-4713-9c79-d4802b2f13a7\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.668654 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-utilities\") pod \"b82398ae-40c9-40dc-8775-65f999dac1a8\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.668679 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l27pt\" (UniqueName: \"kubernetes.io/projected/5254d7c5-8faa-4ede-a82a-210426648d02-kube-api-access-l27pt\") pod \"5254d7c5-8faa-4ede-a82a-210426648d02\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670065 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-utilities\") pod \"23771385-c219-4713-9c79-d4802b2f13a7\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670241 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7w28p\" (UniqueName: \"kubernetes.io/projected/23771385-c219-4713-9c79-d4802b2f13a7-kube-api-access-7w28p\") pod \"23771385-c219-4713-9c79-d4802b2f13a7\" (UID: \"23771385-c219-4713-9c79-d4802b2f13a7\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670323 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7h294\" (UniqueName: \"kubernetes.io/projected/915bf055-c217-4565-a245-8901b61def3e-kube-api-access-7h294\") pod \"915bf055-c217-4565-a245-8901b61def3e\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670356 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xc26q\" (UniqueName: \"kubernetes.io/projected/b82398ae-40c9-40dc-8775-65f999dac1a8-kube-api-access-xc26q\") pod \"b82398ae-40c9-40dc-8775-65f999dac1a8\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670392 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-trusted-ca\") pod \"7a50e5be-5b15-472d-a504-3dc449b474e6\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670423 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-utilities\") pod \"915bf055-c217-4565-a245-8901b61def3e\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670456 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-catalog-content\") pod \"915bf055-c217-4565-a245-8901b61def3e\" (UID: \"915bf055-c217-4565-a245-8901b61def3e\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670454 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-utilities" (OuterVolumeSpecName: "utilities") pod "b82398ae-40c9-40dc-8775-65f999dac1a8" (UID: "b82398ae-40c9-40dc-8775-65f999dac1a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670548 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-catalog-content\") pod \"5254d7c5-8faa-4ede-a82a-210426648d02\" (UID: \"5254d7c5-8faa-4ede-a82a-210426648d02\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670871 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-utilities" (OuterVolumeSpecName: "utilities") pod "5254d7c5-8faa-4ede-a82a-210426648d02" (UID: "5254d7c5-8faa-4ede-a82a-210426648d02"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.670894 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-catalog-content\") pod \"b82398ae-40c9-40dc-8775-65f999dac1a8\" (UID: \"b82398ae-40c9-40dc-8775-65f999dac1a8\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.672483 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-utilities" (OuterVolumeSpecName: "utilities") pod "23771385-c219-4713-9c79-d4802b2f13a7" (UID: "23771385-c219-4713-9c79-d4802b2f13a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.673620 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "7a50e5be-5b15-472d-a504-3dc449b474e6" (UID: "7a50e5be-5b15-472d-a504-3dc449b474e6"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.674353 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5254d7c5-8faa-4ede-a82a-210426648d02-kube-api-access-l27pt" (OuterVolumeSpecName: "kube-api-access-l27pt") pod "5254d7c5-8faa-4ede-a82a-210426648d02" (UID: "5254d7c5-8faa-4ede-a82a-210426648d02"). InnerVolumeSpecName "kube-api-access-l27pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.675852 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2r9qq\" (UniqueName: \"kubernetes.io/projected/7a50e5be-5b15-472d-a504-3dc449b474e6-kube-api-access-2r9qq\") pod \"7a50e5be-5b15-472d-a504-3dc449b474e6\" (UID: \"7a50e5be-5b15-472d-a504-3dc449b474e6\") " Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.676724 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b82398ae-40c9-40dc-8775-65f999dac1a8-kube-api-access-xc26q" (OuterVolumeSpecName: "kube-api-access-xc26q") pod "b82398ae-40c9-40dc-8775-65f999dac1a8" (UID: "b82398ae-40c9-40dc-8775-65f999dac1a8"). InnerVolumeSpecName "kube-api-access-xc26q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.677211 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a50e5be-5b15-472d-a504-3dc449b474e6-kube-api-access-2r9qq" (OuterVolumeSpecName: "kube-api-access-2r9qq") pod "7a50e5be-5b15-472d-a504-3dc449b474e6" (UID: "7a50e5be-5b15-472d-a504-3dc449b474e6"). InnerVolumeSpecName "kube-api-access-2r9qq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.680272 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-utilities" (OuterVolumeSpecName: "utilities") pod "915bf055-c217-4565-a245-8901b61def3e" (UID: "915bf055-c217-4565-a245-8901b61def3e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.684524 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23771385-c219-4713-9c79-d4802b2f13a7-kube-api-access-7w28p" (OuterVolumeSpecName: "kube-api-access-7w28p") pod "23771385-c219-4713-9c79-d4802b2f13a7" (UID: "23771385-c219-4713-9c79-d4802b2f13a7"). InnerVolumeSpecName "kube-api-access-7w28p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.684826 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/915bf055-c217-4565-a245-8901b61def3e-kube-api-access-7h294" (OuterVolumeSpecName: "kube-api-access-7h294") pod "915bf055-c217-4565-a245-8901b61def3e" (UID: "915bf055-c217-4565-a245-8901b61def3e"). InnerVolumeSpecName "kube-api-access-7h294". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.686495 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "7a50e5be-5b15-472d-a504-3dc449b474e6" (UID: "7a50e5be-5b15-472d-a504-3dc449b474e6"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688838 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688877 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l27pt\" (UniqueName: \"kubernetes.io/projected/5254d7c5-8faa-4ede-a82a-210426648d02-kube-api-access-l27pt\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688902 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688918 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7w28p\" (UniqueName: \"kubernetes.io/projected/23771385-c219-4713-9c79-d4802b2f13a7-kube-api-access-7w28p\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688933 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7h294\" (UniqueName: \"kubernetes.io/projected/915bf055-c217-4565-a245-8901b61def3e-kube-api-access-7h294\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688947 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xc26q\" (UniqueName: \"kubernetes.io/projected/b82398ae-40c9-40dc-8775-65f999dac1a8-kube-api-access-xc26q\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688968 4922 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688982 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.688995 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2r9qq\" (UniqueName: \"kubernetes.io/projected/7a50e5be-5b15-472d-a504-3dc449b474e6-kube-api-access-2r9qq\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.689008 4922 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7a50e5be-5b15-472d-a504-3dc449b474e6-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.689026 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.697371 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5254d7c5-8faa-4ede-a82a-210426648d02" (UID: "5254d7c5-8faa-4ede-a82a-210426648d02"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.759644 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "23771385-c219-4713-9c79-d4802b2f13a7" (UID: "23771385-c219-4713-9c79-d4802b2f13a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.769493 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b82398ae-40c9-40dc-8775-65f999dac1a8" (UID: "b82398ae-40c9-40dc-8775-65f999dac1a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.792895 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5254d7c5-8faa-4ede-a82a-210426648d02-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.792924 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b82398ae-40c9-40dc-8775-65f999dac1a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.792933 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23771385-c219-4713-9c79-d4802b2f13a7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.799924 4922 generic.go:334] "Generic (PLEG): container finished" podID="23771385-c219-4713-9c79-d4802b2f13a7" containerID="c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c" exitCode=0 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.799996 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fww6r" event={"ID":"23771385-c219-4713-9c79-d4802b2f13a7","Type":"ContainerDied","Data":"c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.800025 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fww6r" event={"ID":"23771385-c219-4713-9c79-d4802b2f13a7","Type":"ContainerDied","Data":"2b9ac76de04dedea80e36a70a92cc55dfb61c33cf3ff3369662ad059d4e433e1"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.800043 4922 scope.go:117] "RemoveContainer" containerID="c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.800162 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fww6r" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.804384 4922 generic.go:334] "Generic (PLEG): container finished" podID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerID="00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec" exitCode=0 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.804442 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lppzs" event={"ID":"b82398ae-40c9-40dc-8775-65f999dac1a8","Type":"ContainerDied","Data":"00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.804465 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lppzs" event={"ID":"b82398ae-40c9-40dc-8775-65f999dac1a8","Type":"ContainerDied","Data":"dfc73622d6241d40d6f03ac756d8c62d47f09fab94e72fb742028afafd600817"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.804532 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lppzs" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.809675 4922 generic.go:334] "Generic (PLEG): container finished" podID="915bf055-c217-4565-a245-8901b61def3e" containerID="18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36" exitCode=0 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.809717 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgkkf" event={"ID":"915bf055-c217-4565-a245-8901b61def3e","Type":"ContainerDied","Data":"18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.809752 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qgkkf" event={"ID":"915bf055-c217-4565-a245-8901b61def3e","Type":"ContainerDied","Data":"e665c05be32adc1767950893da756e642519d3474d3f7fbbe0904ed099f93d97"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.809752 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qgkkf" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.812987 4922 generic.go:334] "Generic (PLEG): container finished" podID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerID="53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f" exitCode=0 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.813046 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" event={"ID":"7a50e5be-5b15-472d-a504-3dc449b474e6","Type":"ContainerDied","Data":"53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.813070 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" event={"ID":"7a50e5be-5b15-472d-a504-3dc449b474e6","Type":"ContainerDied","Data":"07bc42b9e7fb2ec96b70f7916bdd2a626738382e3bc611dca594d5ac1bce6955"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.813136 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2t2xx" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.816733 4922 scope.go:117] "RemoveContainer" containerID="52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.818279 4922 generic.go:334] "Generic (PLEG): container finished" podID="5254d7c5-8faa-4ede-a82a-210426648d02" containerID="3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec" exitCode=0 Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.818313 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9mhp" event={"ID":"5254d7c5-8faa-4ede-a82a-210426648d02","Type":"ContainerDied","Data":"3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.818337 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d9mhp" event={"ID":"5254d7c5-8faa-4ede-a82a-210426648d02","Type":"ContainerDied","Data":"d653cc0c4757606d6c20b92dfcea5b844f77cde5814e443596ccc4c95323fd49"} Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.818412 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d9mhp" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.824530 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "915bf055-c217-4565-a245-8901b61def3e" (UID: "915bf055-c217-4565-a245-8901b61def3e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.834525 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lppzs"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.846944 4922 scope.go:117] "RemoveContainer" containerID="6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.856545 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lppzs"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.864094 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-vpzh2"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.868550 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fww6r"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.873325 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fww6r"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.878311 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2t2xx"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.881728 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2t2xx"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.884824 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9mhp"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.886146 4922 scope.go:117] "RemoveContainer" containerID="c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.886525 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c\": container with ID starting with c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c not found: ID does not exist" containerID="c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.886559 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c"} err="failed to get container status \"c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c\": rpc error: code = NotFound desc = could not find container \"c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c\": container with ID starting with c5a8d7b0fb31fc9d18d2bdb730d3bfe14caba46ed5ff09c391a38b891b198c5c not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.886582 4922 scope.go:117] "RemoveContainer" containerID="52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.887360 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d\": container with ID starting with 52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d not found: ID does not exist" containerID="52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.887391 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d"} err="failed to get container status \"52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d\": rpc error: code = NotFound desc = could not find container \"52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d\": container with ID starting with 52982adf6070d76ced68c8514a1f287f3792a9e2855ae675d6fef971f75a616d not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.887411 4922 scope.go:117] "RemoveContainer" containerID="6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.887677 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9\": container with ID starting with 6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9 not found: ID does not exist" containerID="6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.887699 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9"} err="failed to get container status \"6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9\": rpc error: code = NotFound desc = could not find container \"6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9\": container with ID starting with 6cf810f2b631921f4029d4e2f5663d3f2b03345ffd37912d8caec436972dd7c9 not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.887716 4922 scope.go:117] "RemoveContainer" containerID="00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.889489 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d9mhp"] Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.893719 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/915bf055-c217-4565-a245-8901b61def3e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.898515 4922 scope.go:117] "RemoveContainer" containerID="50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.911941 4922 scope.go:117] "RemoveContainer" containerID="cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.936211 4922 scope.go:117] "RemoveContainer" containerID="00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.936715 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec\": container with ID starting with 00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec not found: ID does not exist" containerID="00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.936783 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec"} err="failed to get container status \"00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec\": rpc error: code = NotFound desc = could not find container \"00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec\": container with ID starting with 00cce55595643414e3d382a026e612b8ad4b7f70d61f97fdc1fcffcebb8a5dec not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.936814 4922 scope.go:117] "RemoveContainer" containerID="50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.937716 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd\": container with ID starting with 50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd not found: ID does not exist" containerID="50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.937743 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd"} err="failed to get container status \"50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd\": rpc error: code = NotFound desc = could not find container \"50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd\": container with ID starting with 50c3b7c8717c8d22f8af7ba9b96ea7f955e258adddc5dd2da0e18634a84e6cfd not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.937765 4922 scope.go:117] "RemoveContainer" containerID="cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.938652 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd\": container with ID starting with cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd not found: ID does not exist" containerID="cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.938672 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd"} err="failed to get container status \"cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd\": rpc error: code = NotFound desc = could not find container \"cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd\": container with ID starting with cb9a8a751161d86a14b75aaa003db1ee05eb074bb91cccf7c959719524886dbd not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.938685 4922 scope.go:117] "RemoveContainer" containerID="18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.961007 4922 scope.go:117] "RemoveContainer" containerID="83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.982986 4922 scope.go:117] "RemoveContainer" containerID="6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.996166 4922 scope.go:117] "RemoveContainer" containerID="18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.997005 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36\": container with ID starting with 18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36 not found: ID does not exist" containerID="18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.997033 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36"} err="failed to get container status \"18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36\": rpc error: code = NotFound desc = could not find container \"18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36\": container with ID starting with 18de1d9bac96c96f8fd0b2c9fd217290396b81e7ba0451ee7b749ea61bf21c36 not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.997057 4922 scope.go:117] "RemoveContainer" containerID="83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.997511 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b\": container with ID starting with 83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b not found: ID does not exist" containerID="83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.997550 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b"} err="failed to get container status \"83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b\": rpc error: code = NotFound desc = could not find container \"83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b\": container with ID starting with 83ef5c97f9f6c6985007ae73e692902438365d7e53e0021ac45cf17a83ab430b not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.997564 4922 scope.go:117] "RemoveContainer" containerID="6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5" Nov 28 06:58:50 crc kubenswrapper[4922]: E1128 06:58:50.997931 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5\": container with ID starting with 6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5 not found: ID does not exist" containerID="6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.997985 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5"} err="failed to get container status \"6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5\": rpc error: code = NotFound desc = could not find container \"6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5\": container with ID starting with 6c13fe9d4161a143dd9a2d740a66b64d1ca55c775f390eda39e3ec90b8d49db5 not found: ID does not exist" Nov 28 06:58:50 crc kubenswrapper[4922]: I1128 06:58:50.998018 4922 scope.go:117] "RemoveContainer" containerID="53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.017508 4922 scope.go:117] "RemoveContainer" containerID="e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.033266 4922 scope.go:117] "RemoveContainer" containerID="53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.033710 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f\": container with ID starting with 53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f not found: ID does not exist" containerID="53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.033771 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f"} err="failed to get container status \"53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f\": rpc error: code = NotFound desc = could not find container \"53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f\": container with ID starting with 53b3c10cd9c830b69858609b75d7345194dddd72a220a695c2be6192cbb8830f not found: ID does not exist" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.033801 4922 scope.go:117] "RemoveContainer" containerID="e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.034149 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c\": container with ID starting with e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c not found: ID does not exist" containerID="e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.034188 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c"} err="failed to get container status \"e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c\": rpc error: code = NotFound desc = could not find container \"e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c\": container with ID starting with e093cfc0ee7293d8fb0407b9c3558dcfcf2444e36316d5066799b6b52f54fd1c not found: ID does not exist" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.034201 4922 scope.go:117] "RemoveContainer" containerID="3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.048630 4922 scope.go:117] "RemoveContainer" containerID="ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.061675 4922 scope.go:117] "RemoveContainer" containerID="2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.075437 4922 scope.go:117] "RemoveContainer" containerID="3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.076097 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec\": container with ID starting with 3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec not found: ID does not exist" containerID="3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.076170 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec"} err="failed to get container status \"3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec\": rpc error: code = NotFound desc = could not find container \"3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec\": container with ID starting with 3de61e4dac8f0f6cf1a48b5ffcbba88aea90eeb2a0dc6d232ffbaf12e47a81ec not found: ID does not exist" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.076201 4922 scope.go:117] "RemoveContainer" containerID="ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.076734 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564\": container with ID starting with ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564 not found: ID does not exist" containerID="ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.076763 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564"} err="failed to get container status \"ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564\": rpc error: code = NotFound desc = could not find container \"ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564\": container with ID starting with ed8805f962e8cef4ddc0262ccc6ac79c68354dd8329936e3339d81de3c85a564 not found: ID does not exist" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.076784 4922 scope.go:117] "RemoveContainer" containerID="2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.077138 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902\": container with ID starting with 2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902 not found: ID does not exist" containerID="2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.077211 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902"} err="failed to get container status \"2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902\": rpc error: code = NotFound desc = could not find container \"2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902\": container with ID starting with 2b5d43a376f6a82f5408ac3196abad5931d58a91aa5f9bbdfcb33436a7966902 not found: ID does not exist" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.145950 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qgkkf"] Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.148940 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qgkkf"] Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.408308 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23771385-c219-4713-9c79-d4802b2f13a7" path="/var/lib/kubelet/pods/23771385-c219-4713-9c79-d4802b2f13a7/volumes" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.409103 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" path="/var/lib/kubelet/pods/5254d7c5-8faa-4ede-a82a-210426648d02/volumes" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.409928 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" path="/var/lib/kubelet/pods/7a50e5be-5b15-472d-a504-3dc449b474e6/volumes" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.411211 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="915bf055-c217-4565-a245-8901b61def3e" path="/var/lib/kubelet/pods/915bf055-c217-4565-a245-8901b61def3e/volumes" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.411978 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" path="/var/lib/kubelet/pods/b82398ae-40c9-40dc-8775-65f999dac1a8/volumes" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.833270 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" event={"ID":"75a072db-ed75-4f86-8fef-e5c9de393433","Type":"ContainerStarted","Data":"912938717cc269cfdd3b77d423d26ec290cbb0869bd2d62b2ac80aa573d447e7"} Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.833696 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" event={"ID":"75a072db-ed75-4f86-8fef-e5c9de393433","Type":"ContainerStarted","Data":"b8214bdf4d38f4477906bcb9c3e644c59f90744c9d1bf94e2a0453cffbe2d786"} Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.834361 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.840736 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863054 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rtjdf"] Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863563 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="extract-utilities" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863609 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="extract-utilities" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863632 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerName="extract-utilities" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863651 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerName="extract-utilities" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863676 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863692 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863714 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" containerName="extract-utilities" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863730 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" containerName="extract-utilities" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863750 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="extract-content" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863766 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="extract-content" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863793 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23771385-c219-4713-9c79-d4802b2f13a7" containerName="extract-utilities" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863809 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="23771385-c219-4713-9c79-d4802b2f13a7" containerName="extract-utilities" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863843 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863860 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863883 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863900 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.863927 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerName="extract-content" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.863963 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerName="extract-content" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.864012 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864028 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.864052 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" containerName="extract-content" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864067 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" containerName="extract-content" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.864085 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864100 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.864117 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23771385-c219-4713-9c79-d4802b2f13a7" containerName="extract-content" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864130 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="23771385-c219-4713-9c79-d4802b2f13a7" containerName="extract-content" Nov 28 06:58:51 crc kubenswrapper[4922]: E1128 06:58:51.864148 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23771385-c219-4713-9c79-d4802b2f13a7" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864163 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="23771385-c219-4713-9c79-d4802b2f13a7" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864432 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="23771385-c219-4713-9c79-d4802b2f13a7" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864464 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b82398ae-40c9-40dc-8775-65f999dac1a8" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864488 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864511 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="915bf055-c217-4565-a245-8901b61def3e" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864533 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a50e5be-5b15-472d-a504-3dc449b474e6" containerName="marketplace-operator" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.864549 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5254d7c5-8faa-4ede-a82a-210426648d02" containerName="registry-server" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.866158 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.867193 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-vpzh2" podStartSLOduration=1.86701048 podStartE2EDuration="1.86701048s" podCreationTimestamp="2025-11-28 06:58:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:58:51.859681676 +0000 UTC m=+376.780077308" watchObservedRunningTime="2025-11-28 06:58:51.86701048 +0000 UTC m=+376.787406082" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.870280 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.875786 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rtjdf"] Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.905450 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-catalog-content\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.905498 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-utilities\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:51 crc kubenswrapper[4922]: I1128 06:58:51.905593 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p5pm\" (UniqueName: \"kubernetes.io/projected/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-kube-api-access-5p5pm\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.006287 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-catalog-content\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.006342 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-utilities\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.006394 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p5pm\" (UniqueName: \"kubernetes.io/projected/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-kube-api-access-5p5pm\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.006870 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-catalog-content\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.006877 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-utilities\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.028874 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p5pm\" (UniqueName: \"kubernetes.io/projected/c6c2bb80-f2be-424e-92ce-4e3b1e9ce558-kube-api-access-5p5pm\") pod \"redhat-marketplace-rtjdf\" (UID: \"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558\") " pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.197046 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.391013 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rtjdf"] Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.851646 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nxv9j"] Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.853271 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.857189 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.871077 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nxv9j"] Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.875067 4922 generic.go:334] "Generic (PLEG): container finished" podID="c6c2bb80-f2be-424e-92ce-4e3b1e9ce558" containerID="468e402df2d3a96cc3dafc24f8c65d3d666da54f68c041e00f0781bf5c65f70d" exitCode=0 Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.875897 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rtjdf" event={"ID":"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558","Type":"ContainerDied","Data":"468e402df2d3a96cc3dafc24f8c65d3d666da54f68c041e00f0781bf5c65f70d"} Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.875947 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rtjdf" event={"ID":"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558","Type":"ContainerStarted","Data":"8d57afb737b36b130615ccce6050459e3b2e6771a83afdce37752edfee814efc"} Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.919021 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3162404-2375-4cd0-8d8d-73520e721f53-utilities\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.919121 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82b62\" (UniqueName: \"kubernetes.io/projected/a3162404-2375-4cd0-8d8d-73520e721f53-kube-api-access-82b62\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:52 crc kubenswrapper[4922]: I1128 06:58:52.919533 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3162404-2375-4cd0-8d8d-73520e721f53-catalog-content\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.020714 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82b62\" (UniqueName: \"kubernetes.io/projected/a3162404-2375-4cd0-8d8d-73520e721f53-kube-api-access-82b62\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.020809 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3162404-2375-4cd0-8d8d-73520e721f53-catalog-content\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.020853 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3162404-2375-4cd0-8d8d-73520e721f53-utilities\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.021424 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a3162404-2375-4cd0-8d8d-73520e721f53-utilities\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.021652 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a3162404-2375-4cd0-8d8d-73520e721f53-catalog-content\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.053624 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82b62\" (UniqueName: \"kubernetes.io/projected/a3162404-2375-4cd0-8d8d-73520e721f53-kube-api-access-82b62\") pod \"redhat-operators-nxv9j\" (UID: \"a3162404-2375-4cd0-8d8d-73520e721f53\") " pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.195648 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.450553 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c59b458cf-64w5v"] Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.451122 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" podUID="5686f5d7-4c34-459c-addc-f4a9335238d2" containerName="controller-manager" containerID="cri-o://8783dcf357914bed469f9108dbe6e45affd4e86c285e240b65312f111a532b89" gracePeriod=30 Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.465777 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv"] Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.466175 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" podUID="9e536b56-424b-4041-9cb5-63c5edb6bd99" containerName="route-controller-manager" containerID="cri-o://c824e0d3ca95486feb30581033528fe2fd342e53dc90c13fb3c522b924c03206" gracePeriod=30 Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.642447 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nxv9j"] Nov 28 06:58:53 crc kubenswrapper[4922]: W1128 06:58:53.649108 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3162404_2375_4cd0_8d8d_73520e721f53.slice/crio-4891569b073b8bf4e22bf827588ad9182703a555df8d8a88ad89b4565364ac23 WatchSource:0}: Error finding container 4891569b073b8bf4e22bf827588ad9182703a555df8d8a88ad89b4565364ac23: Status 404 returned error can't find the container with id 4891569b073b8bf4e22bf827588ad9182703a555df8d8a88ad89b4565364ac23 Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.881863 4922 generic.go:334] "Generic (PLEG): container finished" podID="9e536b56-424b-4041-9cb5-63c5edb6bd99" containerID="c824e0d3ca95486feb30581033528fe2fd342e53dc90c13fb3c522b924c03206" exitCode=0 Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.881946 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" event={"ID":"9e536b56-424b-4041-9cb5-63c5edb6bd99","Type":"ContainerDied","Data":"c824e0d3ca95486feb30581033528fe2fd342e53dc90c13fb3c522b924c03206"} Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.882267 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" event={"ID":"9e536b56-424b-4041-9cb5-63c5edb6bd99","Type":"ContainerDied","Data":"9d8a34f97a8488e23a49a9e37669830e6d530eb5857bad96ff7879fa5d9861f7"} Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.882281 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d8a34f97a8488e23a49a9e37669830e6d530eb5857bad96ff7879fa5d9861f7" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.883723 4922 generic.go:334] "Generic (PLEG): container finished" podID="a3162404-2375-4cd0-8d8d-73520e721f53" containerID="b3af2a0e4402a5718f44fad4ba0338b1a7153217537887451beb7d5c2c1ec32d" exitCode=0 Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.883805 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxv9j" event={"ID":"a3162404-2375-4cd0-8d8d-73520e721f53","Type":"ContainerDied","Data":"b3af2a0e4402a5718f44fad4ba0338b1a7153217537887451beb7d5c2c1ec32d"} Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.883822 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxv9j" event={"ID":"a3162404-2375-4cd0-8d8d-73520e721f53","Type":"ContainerStarted","Data":"4891569b073b8bf4e22bf827588ad9182703a555df8d8a88ad89b4565364ac23"} Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.885817 4922 generic.go:334] "Generic (PLEG): container finished" podID="5686f5d7-4c34-459c-addc-f4a9335238d2" containerID="8783dcf357914bed469f9108dbe6e45affd4e86c285e240b65312f111a532b89" exitCode=0 Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.885875 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" event={"ID":"5686f5d7-4c34-459c-addc-f4a9335238d2","Type":"ContainerDied","Data":"8783dcf357914bed469f9108dbe6e45affd4e86c285e240b65312f111a532b89"} Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.885898 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" event={"ID":"5686f5d7-4c34-459c-addc-f4a9335238d2","Type":"ContainerDied","Data":"67d1ed3a3dcfc2f32cfbc3213790c47bb140aa2a22f8ea5b78cde977e7cd5e31"} Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.885908 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67d1ed3a3dcfc2f32cfbc3213790c47bb140aa2a22f8ea5b78cde977e7cd5e31" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.895413 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.912106 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.931879 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-client-ca\") pod \"9e536b56-424b-4041-9cb5-63c5edb6bd99\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.931948 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-config\") pod \"9e536b56-424b-4041-9cb5-63c5edb6bd99\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.932065 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e536b56-424b-4041-9cb5-63c5edb6bd99-serving-cert\") pod \"9e536b56-424b-4041-9cb5-63c5edb6bd99\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.932125 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6l6fs\" (UniqueName: \"kubernetes.io/projected/9e536b56-424b-4041-9cb5-63c5edb6bd99-kube-api-access-6l6fs\") pod \"9e536b56-424b-4041-9cb5-63c5edb6bd99\" (UID: \"9e536b56-424b-4041-9cb5-63c5edb6bd99\") " Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.932801 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-client-ca" (OuterVolumeSpecName: "client-ca") pod "9e536b56-424b-4041-9cb5-63c5edb6bd99" (UID: "9e536b56-424b-4041-9cb5-63c5edb6bd99"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.932831 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-config" (OuterVolumeSpecName: "config") pod "9e536b56-424b-4041-9cb5-63c5edb6bd99" (UID: "9e536b56-424b-4041-9cb5-63c5edb6bd99"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.938376 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e536b56-424b-4041-9cb5-63c5edb6bd99-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9e536b56-424b-4041-9cb5-63c5edb6bd99" (UID: "9e536b56-424b-4041-9cb5-63c5edb6bd99"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:58:53 crc kubenswrapper[4922]: I1128 06:58:53.938391 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e536b56-424b-4041-9cb5-63c5edb6bd99-kube-api-access-6l6fs" (OuterVolumeSpecName: "kube-api-access-6l6fs") pod "9e536b56-424b-4041-9cb5-63c5edb6bd99" (UID: "9e536b56-424b-4041-9cb5-63c5edb6bd99"). InnerVolumeSpecName "kube-api-access-6l6fs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.033596 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-client-ca\") pod \"5686f5d7-4c34-459c-addc-f4a9335238d2\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.033652 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-config\") pod \"5686f5d7-4c34-459c-addc-f4a9335238d2\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.033675 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cft2r\" (UniqueName: \"kubernetes.io/projected/5686f5d7-4c34-459c-addc-f4a9335238d2-kube-api-access-cft2r\") pod \"5686f5d7-4c34-459c-addc-f4a9335238d2\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.033719 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5686f5d7-4c34-459c-addc-f4a9335238d2-serving-cert\") pod \"5686f5d7-4c34-459c-addc-f4a9335238d2\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.033765 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-proxy-ca-bundles\") pod \"5686f5d7-4c34-459c-addc-f4a9335238d2\" (UID: \"5686f5d7-4c34-459c-addc-f4a9335238d2\") " Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.034027 4922 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.034041 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e536b56-424b-4041-9cb5-63c5edb6bd99-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.034050 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e536b56-424b-4041-9cb5-63c5edb6bd99-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.034061 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6l6fs\" (UniqueName: \"kubernetes.io/projected/9e536b56-424b-4041-9cb5-63c5edb6bd99-kube-api-access-6l6fs\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.034819 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5686f5d7-4c34-459c-addc-f4a9335238d2" (UID: "5686f5d7-4c34-459c-addc-f4a9335238d2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.035183 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-client-ca" (OuterVolumeSpecName: "client-ca") pod "5686f5d7-4c34-459c-addc-f4a9335238d2" (UID: "5686f5d7-4c34-459c-addc-f4a9335238d2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.035428 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-config" (OuterVolumeSpecName: "config") pod "5686f5d7-4c34-459c-addc-f4a9335238d2" (UID: "5686f5d7-4c34-459c-addc-f4a9335238d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.037461 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5686f5d7-4c34-459c-addc-f4a9335238d2-kube-api-access-cft2r" (OuterVolumeSpecName: "kube-api-access-cft2r") pod "5686f5d7-4c34-459c-addc-f4a9335238d2" (UID: "5686f5d7-4c34-459c-addc-f4a9335238d2"). InnerVolumeSpecName "kube-api-access-cft2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.039560 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5686f5d7-4c34-459c-addc-f4a9335238d2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5686f5d7-4c34-459c-addc-f4a9335238d2" (UID: "5686f5d7-4c34-459c-addc-f4a9335238d2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.135751 4922 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5686f5d7-4c34-459c-addc-f4a9335238d2-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.135789 4922 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.135802 4922 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.135811 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5686f5d7-4c34-459c-addc-f4a9335238d2-config\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.135821 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cft2r\" (UniqueName: \"kubernetes.io/projected/5686f5d7-4c34-459c-addc-f4a9335238d2-kube-api-access-cft2r\") on node \"crc\" DevicePath \"\"" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.248818 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g5rdz"] Nov 28 06:58:54 crc kubenswrapper[4922]: E1128 06:58:54.249019 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e536b56-424b-4041-9cb5-63c5edb6bd99" containerName="route-controller-manager" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.249032 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e536b56-424b-4041-9cb5-63c5edb6bd99" containerName="route-controller-manager" Nov 28 06:58:54 crc kubenswrapper[4922]: E1128 06:58:54.249039 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5686f5d7-4c34-459c-addc-f4a9335238d2" containerName="controller-manager" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.249045 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5686f5d7-4c34-459c-addc-f4a9335238d2" containerName="controller-manager" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.249150 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e536b56-424b-4041-9cb5-63c5edb6bd99" containerName="route-controller-manager" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.249165 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5686f5d7-4c34-459c-addc-f4a9335238d2" containerName="controller-manager" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.249776 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.251995 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.256759 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g5rdz"] Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.338308 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80be7cf9-c24f-4f7d-a6df-49ba99b04994-utilities\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.338476 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80be7cf9-c24f-4f7d-a6df-49ba99b04994-catalog-content\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.338534 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn72h\" (UniqueName: \"kubernetes.io/projected/80be7cf9-c24f-4f7d-a6df-49ba99b04994-kube-api-access-fn72h\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.439940 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80be7cf9-c24f-4f7d-a6df-49ba99b04994-catalog-content\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.439999 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn72h\" (UniqueName: \"kubernetes.io/projected/80be7cf9-c24f-4f7d-a6df-49ba99b04994-kube-api-access-fn72h\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.440042 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80be7cf9-c24f-4f7d-a6df-49ba99b04994-utilities\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.440419 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/80be7cf9-c24f-4f7d-a6df-49ba99b04994-catalog-content\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.440505 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/80be7cf9-c24f-4f7d-a6df-49ba99b04994-utilities\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.462403 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn72h\" (UniqueName: \"kubernetes.io/projected/80be7cf9-c24f-4f7d-a6df-49ba99b04994-kube-api-access-fn72h\") pod \"certified-operators-g5rdz\" (UID: \"80be7cf9-c24f-4f7d-a6df-49ba99b04994\") " pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.568053 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.800127 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g5rdz"] Nov 28 06:58:54 crc kubenswrapper[4922]: W1128 06:58:54.818260 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80be7cf9_c24f_4f7d_a6df_49ba99b04994.slice/crio-12685c58210713da5ec041d2f7fbef7d2d2fb6fff6f8103e57985c413d7b958b WatchSource:0}: Error finding container 12685c58210713da5ec041d2f7fbef7d2d2fb6fff6f8103e57985c413d7b958b: Status 404 returned error can't find the container with id 12685c58210713da5ec041d2f7fbef7d2d2fb6fff6f8103e57985c413d7b958b Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.904519 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5rdz" event={"ID":"80be7cf9-c24f-4f7d-a6df-49ba99b04994","Type":"ContainerStarted","Data":"12685c58210713da5ec041d2f7fbef7d2d2fb6fff6f8103e57985c413d7b958b"} Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.909582 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.917479 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxv9j" event={"ID":"a3162404-2375-4cd0-8d8d-73520e721f53","Type":"ContainerStarted","Data":"f0fd4051c0dc781623a9e416a52929417b047b832f9385b3a3ca2c825ec1a3a6"} Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.917557 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c59b458cf-64w5v" Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.963069 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c59b458cf-64w5v"] Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.968338 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6c59b458cf-64w5v"] Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.971120 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv"] Nov 28 06:58:54 crc kubenswrapper[4922]: I1128 06:58:54.973973 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cdcb8b5d6-kcnnv"] Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.252634 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gwrfc"] Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.253522 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.257065 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.263089 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l"] Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.264294 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.266643 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7d474cd5cb-q8c47"] Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.267265 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.269324 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.269544 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.269551 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.269656 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.269689 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.269803 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.271738 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.271801 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.272290 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.272368 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.273899 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.280708 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l"] Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.280728 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.282043 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.291488 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7d474cd5cb-q8c47"] Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.311539 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gwrfc"] Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367405 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhwbv\" (UniqueName: \"kubernetes.io/projected/bb310745-a2e5-4a45-8896-fdefedd61450-kube-api-access-dhwbv\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367463 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a991fe40-8b80-453f-ade4-588ea459cf0b-utilities\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367493 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb310745-a2e5-4a45-8896-fdefedd61450-config\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367537 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-client-ca\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367611 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d62f44f3-7e41-49ec-ba55-da53a4215546-serving-cert\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367634 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22kcq\" (UniqueName: \"kubernetes.io/projected/a991fe40-8b80-453f-ade4-588ea459cf0b-kube-api-access-22kcq\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367657 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a991fe40-8b80-453f-ade4-588ea459cf0b-catalog-content\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367680 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxbhv\" (UniqueName: \"kubernetes.io/projected/d62f44f3-7e41-49ec-ba55-da53a4215546-kube-api-access-fxbhv\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367699 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-config\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367882 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bb310745-a2e5-4a45-8896-fdefedd61450-client-ca\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.367984 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-proxy-ca-bundles\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.368034 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb310745-a2e5-4a45-8896-fdefedd61450-serving-cert\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.405404 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5686f5d7-4c34-459c-addc-f4a9335238d2" path="/var/lib/kubelet/pods/5686f5d7-4c34-459c-addc-f4a9335238d2/volumes" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.406099 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e536b56-424b-4041-9cb5-63c5edb6bd99" path="/var/lib/kubelet/pods/9e536b56-424b-4041-9cb5-63c5edb6bd99/volumes" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.469853 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-config\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.469907 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bb310745-a2e5-4a45-8896-fdefedd61450-client-ca\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.469947 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-proxy-ca-bundles\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.469989 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb310745-a2e5-4a45-8896-fdefedd61450-serving-cert\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.470029 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhwbv\" (UniqueName: \"kubernetes.io/projected/bb310745-a2e5-4a45-8896-fdefedd61450-kube-api-access-dhwbv\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.470103 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a991fe40-8b80-453f-ade4-588ea459cf0b-utilities\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.470129 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb310745-a2e5-4a45-8896-fdefedd61450-config\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.470153 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-client-ca\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.470207 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d62f44f3-7e41-49ec-ba55-da53a4215546-serving-cert\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.470251 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22kcq\" (UniqueName: \"kubernetes.io/projected/a991fe40-8b80-453f-ade4-588ea459cf0b-kube-api-access-22kcq\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.470272 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a991fe40-8b80-453f-ade4-588ea459cf0b-catalog-content\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.470289 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxbhv\" (UniqueName: \"kubernetes.io/projected/d62f44f3-7e41-49ec-ba55-da53a4215546-kube-api-access-fxbhv\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.471256 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bb310745-a2e5-4a45-8896-fdefedd61450-client-ca\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.471337 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-config\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.471463 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-proxy-ca-bundles\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.472398 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a991fe40-8b80-453f-ade4-588ea459cf0b-catalog-content\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.472627 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d62f44f3-7e41-49ec-ba55-da53a4215546-client-ca\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.472768 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a991fe40-8b80-453f-ade4-588ea459cf0b-utilities\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.478133 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb310745-a2e5-4a45-8896-fdefedd61450-config\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.485940 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb310745-a2e5-4a45-8896-fdefedd61450-serving-cert\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.485989 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d62f44f3-7e41-49ec-ba55-da53a4215546-serving-cert\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.487750 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxbhv\" (UniqueName: \"kubernetes.io/projected/d62f44f3-7e41-49ec-ba55-da53a4215546-kube-api-access-fxbhv\") pod \"controller-manager-7d474cd5cb-q8c47\" (UID: \"d62f44f3-7e41-49ec-ba55-da53a4215546\") " pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.488789 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22kcq\" (UniqueName: \"kubernetes.io/projected/a991fe40-8b80-453f-ade4-588ea459cf0b-kube-api-access-22kcq\") pod \"community-operators-gwrfc\" (UID: \"a991fe40-8b80-453f-ade4-588ea459cf0b\") " pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.494576 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhwbv\" (UniqueName: \"kubernetes.io/projected/bb310745-a2e5-4a45-8896-fdefedd61450-kube-api-access-dhwbv\") pod \"route-controller-manager-d6b9cbd54-lc44l\" (UID: \"bb310745-a2e5-4a45-8896-fdefedd61450\") " pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.578726 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.650569 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.657828 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.928407 4922 generic.go:334] "Generic (PLEG): container finished" podID="a3162404-2375-4cd0-8d8d-73520e721f53" containerID="f0fd4051c0dc781623a9e416a52929417b047b832f9385b3a3ca2c825ec1a3a6" exitCode=0 Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.928473 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxv9j" event={"ID":"a3162404-2375-4cd0-8d8d-73520e721f53","Type":"ContainerDied","Data":"f0fd4051c0dc781623a9e416a52929417b047b832f9385b3a3ca2c825ec1a3a6"} Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.931811 4922 generic.go:334] "Generic (PLEG): container finished" podID="80be7cf9-c24f-4f7d-a6df-49ba99b04994" containerID="be29d0bcdc3dd2189518195061282b03b81391a0780744f230e84566b63c7e88" exitCode=0 Nov 28 06:58:55 crc kubenswrapper[4922]: I1128 06:58:55.931964 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5rdz" event={"ID":"80be7cf9-c24f-4f7d-a6df-49ba99b04994","Type":"ContainerDied","Data":"be29d0bcdc3dd2189518195061282b03b81391a0780744f230e84566b63c7e88"} Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.640865 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l"] Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.872427 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gwrfc"] Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.904438 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7d474cd5cb-q8c47"] Nov 28 06:58:56 crc kubenswrapper[4922]: W1128 06:58:56.914256 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd62f44f3_7e41_49ec_ba55_da53a4215546.slice/crio-0268e5405aec3786f9422e1165d5a654e12c4bcdd1ac2301b84e13382ef26167 WatchSource:0}: Error finding container 0268e5405aec3786f9422e1165d5a654e12c4bcdd1ac2301b84e13382ef26167: Status 404 returned error can't find the container with id 0268e5405aec3786f9422e1165d5a654e12c4bcdd1ac2301b84e13382ef26167 Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.950815 4922 generic.go:334] "Generic (PLEG): container finished" podID="c6c2bb80-f2be-424e-92ce-4e3b1e9ce558" containerID="f6063c71adcb622342b23bef656ae222d826d387650a89bda712147f51716f00" exitCode=0 Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.950884 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rtjdf" event={"ID":"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558","Type":"ContainerDied","Data":"f6063c71adcb622342b23bef656ae222d826d387650a89bda712147f51716f00"} Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.954196 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" event={"ID":"d62f44f3-7e41-49ec-ba55-da53a4215546","Type":"ContainerStarted","Data":"0268e5405aec3786f9422e1165d5a654e12c4bcdd1ac2301b84e13382ef26167"} Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.959348 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwrfc" event={"ID":"a991fe40-8b80-453f-ade4-588ea459cf0b","Type":"ContainerStarted","Data":"b6305a4ac13a644dd4bbbbf1a8382cf693a7593eede0fcc87b707ec6cd0f640b"} Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.970105 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" event={"ID":"bb310745-a2e5-4a45-8896-fdefedd61450","Type":"ContainerStarted","Data":"b531e785e3b7702ac2435c19756bdb7298e4f4484a3c4cfa862f75386b4e21ab"} Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.970261 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" event={"ID":"bb310745-a2e5-4a45-8896-fdefedd61450","Type":"ContainerStarted","Data":"455b0d13e3ec5a5c1cbe4f1cef18795d0e426faa397e622a47fafd2ac6d45fa4"} Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.972672 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:56 crc kubenswrapper[4922]: I1128 06:58:56.994325 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" podStartSLOduration=3.994306529 podStartE2EDuration="3.994306529s" podCreationTimestamp="2025-11-28 06:58:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:58:56.994106613 +0000 UTC m=+381.914502195" watchObservedRunningTime="2025-11-28 06:58:56.994306529 +0000 UTC m=+381.914702131" Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.304756 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-d6b9cbd54-lc44l" Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.312392 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.312452 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.977584 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rtjdf" event={"ID":"c6c2bb80-f2be-424e-92ce-4e3b1e9ce558","Type":"ContainerStarted","Data":"ccec522adc595f3e65c372bc946a0a60d60b020807290fd3d4692643df56290d"} Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.979248 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" event={"ID":"d62f44f3-7e41-49ec-ba55-da53a4215546","Type":"ContainerStarted","Data":"fe3b59e080bb783306fe70a340e8a4f1d09e9678e68e78c778f42736c1bc223f"} Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.979464 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.983057 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nxv9j" event={"ID":"a3162404-2375-4cd0-8d8d-73520e721f53","Type":"ContainerStarted","Data":"6d29b6a0383121a1b1cf592db3a387520c3a127f531aeb0d350297d5a9817066"} Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.984296 4922 generic.go:334] "Generic (PLEG): container finished" podID="a991fe40-8b80-453f-ade4-588ea459cf0b" containerID="b498a0a1830022030e4c4e6f27c1d6108714d8f49f69e084576e954021a2e880" exitCode=0 Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.984407 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwrfc" event={"ID":"a991fe40-8b80-453f-ade4-588ea459cf0b","Type":"ContainerDied","Data":"b498a0a1830022030e4c4e6f27c1d6108714d8f49f69e084576e954021a2e880"} Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.984754 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.986587 4922 generic.go:334] "Generic (PLEG): container finished" podID="80be7cf9-c24f-4f7d-a6df-49ba99b04994" containerID="040154d0e4352af9ca51b9e01aabcb960c8b2fe6709ab7cce265811e0e81d3bc" exitCode=0 Nov 28 06:58:57 crc kubenswrapper[4922]: I1128 06:58:57.986724 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5rdz" event={"ID":"80be7cf9-c24f-4f7d-a6df-49ba99b04994","Type":"ContainerDied","Data":"040154d0e4352af9ca51b9e01aabcb960c8b2fe6709ab7cce265811e0e81d3bc"} Nov 28 06:58:58 crc kubenswrapper[4922]: I1128 06:58:58.003213 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7d474cd5cb-q8c47" podStartSLOduration=5.00319478 podStartE2EDuration="5.00319478s" podCreationTimestamp="2025-11-28 06:58:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 06:58:57.999706212 +0000 UTC m=+382.920101814" watchObservedRunningTime="2025-11-28 06:58:58.00319478 +0000 UTC m=+382.923590372" Nov 28 06:58:58 crc kubenswrapper[4922]: I1128 06:58:58.024315 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rtjdf" podStartSLOduration=2.392277935 podStartE2EDuration="7.024292099s" podCreationTimestamp="2025-11-28 06:58:51 +0000 UTC" firstStartedPulling="2025-11-28 06:58:52.877720112 +0000 UTC m=+377.798115734" lastFinishedPulling="2025-11-28 06:58:57.509734316 +0000 UTC m=+382.430129898" observedRunningTime="2025-11-28 06:58:58.020695789 +0000 UTC m=+382.941091371" watchObservedRunningTime="2025-11-28 06:58:58.024292099 +0000 UTC m=+382.944687681" Nov 28 06:58:58 crc kubenswrapper[4922]: I1128 06:58:58.083191 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nxv9j" podStartSLOduration=3.052258703 podStartE2EDuration="6.083170524s" podCreationTimestamp="2025-11-28 06:58:52 +0000 UTC" firstStartedPulling="2025-11-28 06:58:53.884939436 +0000 UTC m=+378.805335018" lastFinishedPulling="2025-11-28 06:58:56.915851257 +0000 UTC m=+381.836246839" observedRunningTime="2025-11-28 06:58:58.058739671 +0000 UTC m=+382.979135253" watchObservedRunningTime="2025-11-28 06:58:58.083170524 +0000 UTC m=+383.003566106" Nov 28 06:58:58 crc kubenswrapper[4922]: I1128 06:58:58.993686 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwrfc" event={"ID":"a991fe40-8b80-453f-ade4-588ea459cf0b","Type":"ContainerStarted","Data":"40115a1b5024b9baad5bf808a69cc95940eefa7d560c3ef0e30a7e48666db0d7"} Nov 28 06:58:58 crc kubenswrapper[4922]: I1128 06:58:58.998825 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g5rdz" event={"ID":"80be7cf9-c24f-4f7d-a6df-49ba99b04994","Type":"ContainerStarted","Data":"7e3fbd4b488b6a8ffb43a44efccb625650dacafa7c13a520e9be6fbebc0183f8"} Nov 28 06:58:59 crc kubenswrapper[4922]: I1128 06:58:59.033416 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g5rdz" podStartSLOduration=2.75233164 podStartE2EDuration="5.033398616s" podCreationTimestamp="2025-11-28 06:58:54 +0000 UTC" firstStartedPulling="2025-11-28 06:58:56.373309253 +0000 UTC m=+381.293704835" lastFinishedPulling="2025-11-28 06:58:58.654376229 +0000 UTC m=+383.574771811" observedRunningTime="2025-11-28 06:58:59.030527106 +0000 UTC m=+383.950922698" watchObservedRunningTime="2025-11-28 06:58:59.033398616 +0000 UTC m=+383.953794198" Nov 28 06:59:00 crc kubenswrapper[4922]: I1128 06:59:00.005772 4922 generic.go:334] "Generic (PLEG): container finished" podID="a991fe40-8b80-453f-ade4-588ea459cf0b" containerID="40115a1b5024b9baad5bf808a69cc95940eefa7d560c3ef0e30a7e48666db0d7" exitCode=0 Nov 28 06:59:00 crc kubenswrapper[4922]: I1128 06:59:00.005841 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwrfc" event={"ID":"a991fe40-8b80-453f-ade4-588ea459cf0b","Type":"ContainerDied","Data":"40115a1b5024b9baad5bf808a69cc95940eefa7d560c3ef0e30a7e48666db0d7"} Nov 28 06:59:02 crc kubenswrapper[4922]: I1128 06:59:02.018865 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwrfc" event={"ID":"a991fe40-8b80-453f-ade4-588ea459cf0b","Type":"ContainerStarted","Data":"083082528dc4ebe92fdd8e13375a72b0033977a2a8ee0e225a9e3f9e4b02dc2a"} Nov 28 06:59:02 crc kubenswrapper[4922]: I1128 06:59:02.037151 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gwrfc" podStartSLOduration=4.07467436 podStartE2EDuration="7.037133129s" podCreationTimestamp="2025-11-28 06:58:55 +0000 UTC" firstStartedPulling="2025-11-28 06:58:57.987310096 +0000 UTC m=+382.907705668" lastFinishedPulling="2025-11-28 06:59:00.949768855 +0000 UTC m=+385.870164437" observedRunningTime="2025-11-28 06:59:02.035702579 +0000 UTC m=+386.956098171" watchObservedRunningTime="2025-11-28 06:59:02.037133129 +0000 UTC m=+386.957528721" Nov 28 06:59:02 crc kubenswrapper[4922]: I1128 06:59:02.198049 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:59:02 crc kubenswrapper[4922]: I1128 06:59:02.198344 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:59:02 crc kubenswrapper[4922]: I1128 06:59:02.257021 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:59:03 crc kubenswrapper[4922]: I1128 06:59:03.076510 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rtjdf" Nov 28 06:59:03 crc kubenswrapper[4922]: I1128 06:59:03.196774 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:59:03 crc kubenswrapper[4922]: I1128 06:59:03.197075 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:59:03 crc kubenswrapper[4922]: I1128 06:59:03.235656 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:59:03 crc kubenswrapper[4922]: I1128 06:59:03.734292 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" podUID="dd20079d-34c4-4ea6-920f-420a1d6bb863" containerName="registry" containerID="cri-o://0306f5f1cf36667e60609f05cb41f1eed0c4d0aa5a4fee9cdeab898377c19b9c" gracePeriod=30 Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.032806 4922 generic.go:334] "Generic (PLEG): container finished" podID="dd20079d-34c4-4ea6-920f-420a1d6bb863" containerID="0306f5f1cf36667e60609f05cb41f1eed0c4d0aa5a4fee9cdeab898377c19b9c" exitCode=0 Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.032932 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" event={"ID":"dd20079d-34c4-4ea6-920f-420a1d6bb863","Type":"ContainerDied","Data":"0306f5f1cf36667e60609f05cb41f1eed0c4d0aa5a4fee9cdeab898377c19b9c"} Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.087018 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nxv9j" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.262285 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.296783 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"dd20079d-34c4-4ea6-920f-420a1d6bb863\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.296828 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-tls\") pod \"dd20079d-34c4-4ea6-920f-420a1d6bb863\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.296856 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-certificates\") pod \"dd20079d-34c4-4ea6-920f-420a1d6bb863\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.296921 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dd20079d-34c4-4ea6-920f-420a1d6bb863-ca-trust-extracted\") pod \"dd20079d-34c4-4ea6-920f-420a1d6bb863\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.297862 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "dd20079d-34c4-4ea6-920f-420a1d6bb863" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.297938 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dd20079d-34c4-4ea6-920f-420a1d6bb863-installation-pull-secrets\") pod \"dd20079d-34c4-4ea6-920f-420a1d6bb863\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.298093 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-trusted-ca\") pod \"dd20079d-34c4-4ea6-920f-420a1d6bb863\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.298121 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-bound-sa-token\") pod \"dd20079d-34c4-4ea6-920f-420a1d6bb863\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.298185 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6l7n\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-kube-api-access-s6l7n\") pod \"dd20079d-34c4-4ea6-920f-420a1d6bb863\" (UID: \"dd20079d-34c4-4ea6-920f-420a1d6bb863\") " Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.298556 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "dd20079d-34c4-4ea6-920f-420a1d6bb863" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.298935 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.298959 4922 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.303327 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd20079d-34c4-4ea6-920f-420a1d6bb863-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "dd20079d-34c4-4ea6-920f-420a1d6bb863" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.303485 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "dd20079d-34c4-4ea6-920f-420a1d6bb863" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.303648 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "dd20079d-34c4-4ea6-920f-420a1d6bb863" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.307823 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-kube-api-access-s6l7n" (OuterVolumeSpecName: "kube-api-access-s6l7n") pod "dd20079d-34c4-4ea6-920f-420a1d6bb863" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863"). InnerVolumeSpecName "kube-api-access-s6l7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.308090 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "dd20079d-34c4-4ea6-920f-420a1d6bb863" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.311415 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd20079d-34c4-4ea6-920f-420a1d6bb863-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "dd20079d-34c4-4ea6-920f-420a1d6bb863" (UID: "dd20079d-34c4-4ea6-920f-420a1d6bb863"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.400378 4922 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.400412 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6l7n\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-kube-api-access-s6l7n\") on node \"crc\" DevicePath \"\"" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.400427 4922 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dd20079d-34c4-4ea6-920f-420a1d6bb863-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.400437 4922 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dd20079d-34c4-4ea6-920f-420a1d6bb863-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.400446 4922 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dd20079d-34c4-4ea6-920f-420a1d6bb863-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.568473 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.568537 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:59:04 crc kubenswrapper[4922]: I1128 06:59:04.627874 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.046387 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" event={"ID":"dd20079d-34c4-4ea6-920f-420a1d6bb863","Type":"ContainerDied","Data":"91b1a67cc569943354c6ac89ad71588609e5aef1403b1d93aee31aca338fe5df"} Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.046463 4922 scope.go:117] "RemoveContainer" containerID="0306f5f1cf36667e60609f05cb41f1eed0c4d0aa5a4fee9cdeab898377c19b9c" Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.046715 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-sckww" Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.097156 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sckww"] Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.102943 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-sckww"] Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.127736 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g5rdz" Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.406926 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd20079d-34c4-4ea6-920f-420a1d6bb863" path="/var/lib/kubelet/pods/dd20079d-34c4-4ea6-920f-420a1d6bb863/volumes" Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.579058 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.579161 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:59:05 crc kubenswrapper[4922]: I1128 06:59:05.630521 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:59:06 crc kubenswrapper[4922]: I1128 06:59:06.097899 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gwrfc" Nov 28 06:59:27 crc kubenswrapper[4922]: I1128 06:59:27.311958 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 06:59:27 crc kubenswrapper[4922]: I1128 06:59:27.312590 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 06:59:27 crc kubenswrapper[4922]: I1128 06:59:27.312653 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 06:59:27 crc kubenswrapper[4922]: I1128 06:59:27.313735 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"faa2e15e685a62da719acf12fe75a2f590f6b07faa5cd9c8e0a536878c38e595"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 06:59:27 crc kubenswrapper[4922]: I1128 06:59:27.313861 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://faa2e15e685a62da719acf12fe75a2f590f6b07faa5cd9c8e0a536878c38e595" gracePeriod=600 Nov 28 06:59:28 crc kubenswrapper[4922]: I1128 06:59:28.201198 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="faa2e15e685a62da719acf12fe75a2f590f6b07faa5cd9c8e0a536878c38e595" exitCode=0 Nov 28 06:59:28 crc kubenswrapper[4922]: I1128 06:59:28.201343 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"faa2e15e685a62da719acf12fe75a2f590f6b07faa5cd9c8e0a536878c38e595"} Nov 28 06:59:28 crc kubenswrapper[4922]: I1128 06:59:28.201637 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"e2cc3a05c61bc0a99d69c8dde17ac05a9604299976427f43298e281dd4e8335e"} Nov 28 06:59:28 crc kubenswrapper[4922]: I1128 06:59:28.201672 4922 scope.go:117] "RemoveContainer" containerID="b903fc60349ec7d5004f23ec933dce13d646a2c343819495564005dfb4813314" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.207623 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm"] Nov 28 07:00:00 crc kubenswrapper[4922]: E1128 07:00:00.208825 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd20079d-34c4-4ea6-920f-420a1d6bb863" containerName="registry" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.208859 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd20079d-34c4-4ea6-920f-420a1d6bb863" containerName="registry" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.209130 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd20079d-34c4-4ea6-920f-420a1d6bb863" containerName="registry" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.209994 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.215635 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.215746 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm"] Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.216549 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.277068 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9c226e4-e4fe-426b-9243-9ff96f72c93a-config-volume\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.277167 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmndn\" (UniqueName: \"kubernetes.io/projected/b9c226e4-e4fe-426b-9243-9ff96f72c93a-kube-api-access-mmndn\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.277206 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9c226e4-e4fe-426b-9243-9ff96f72c93a-secret-volume\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.378523 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmndn\" (UniqueName: \"kubernetes.io/projected/b9c226e4-e4fe-426b-9243-9ff96f72c93a-kube-api-access-mmndn\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.378606 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9c226e4-e4fe-426b-9243-9ff96f72c93a-secret-volume\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.378700 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9c226e4-e4fe-426b-9243-9ff96f72c93a-config-volume\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.380431 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9c226e4-e4fe-426b-9243-9ff96f72c93a-config-volume\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.389080 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9c226e4-e4fe-426b-9243-9ff96f72c93a-secret-volume\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.400861 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmndn\" (UniqueName: \"kubernetes.io/projected/b9c226e4-e4fe-426b-9243-9ff96f72c93a-kube-api-access-mmndn\") pod \"collect-profiles-29405220-bpqvm\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.531927 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:00 crc kubenswrapper[4922]: I1128 07:00:00.984262 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm"] Nov 28 07:00:01 crc kubenswrapper[4922]: I1128 07:00:01.479723 4922 generic.go:334] "Generic (PLEG): container finished" podID="b9c226e4-e4fe-426b-9243-9ff96f72c93a" containerID="af64004bafd385c4d8d14c353710b2eb3556a86e73425b07063fb732991dc44b" exitCode=0 Nov 28 07:00:01 crc kubenswrapper[4922]: I1128 07:00:01.480078 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" event={"ID":"b9c226e4-e4fe-426b-9243-9ff96f72c93a","Type":"ContainerDied","Data":"af64004bafd385c4d8d14c353710b2eb3556a86e73425b07063fb732991dc44b"} Nov 28 07:00:01 crc kubenswrapper[4922]: I1128 07:00:01.480114 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" event={"ID":"b9c226e4-e4fe-426b-9243-9ff96f72c93a","Type":"ContainerStarted","Data":"b1db0e2e141fe1f742e5f65c57656b7854ec83cad45d1b67da29b52aba19f5be"} Nov 28 07:00:02 crc kubenswrapper[4922]: I1128 07:00:02.852263 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:02 crc kubenswrapper[4922]: I1128 07:00:02.920273 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9c226e4-e4fe-426b-9243-9ff96f72c93a-config-volume\") pod \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " Nov 28 07:00:02 crc kubenswrapper[4922]: I1128 07:00:02.920361 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmndn\" (UniqueName: \"kubernetes.io/projected/b9c226e4-e4fe-426b-9243-9ff96f72c93a-kube-api-access-mmndn\") pod \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " Nov 28 07:00:02 crc kubenswrapper[4922]: I1128 07:00:02.920508 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9c226e4-e4fe-426b-9243-9ff96f72c93a-secret-volume\") pod \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\" (UID: \"b9c226e4-e4fe-426b-9243-9ff96f72c93a\") " Nov 28 07:00:02 crc kubenswrapper[4922]: I1128 07:00:02.922551 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9c226e4-e4fe-426b-9243-9ff96f72c93a-config-volume" (OuterVolumeSpecName: "config-volume") pod "b9c226e4-e4fe-426b-9243-9ff96f72c93a" (UID: "b9c226e4-e4fe-426b-9243-9ff96f72c93a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:00:02 crc kubenswrapper[4922]: I1128 07:00:02.933091 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9c226e4-e4fe-426b-9243-9ff96f72c93a-kube-api-access-mmndn" (OuterVolumeSpecName: "kube-api-access-mmndn") pod "b9c226e4-e4fe-426b-9243-9ff96f72c93a" (UID: "b9c226e4-e4fe-426b-9243-9ff96f72c93a"). InnerVolumeSpecName "kube-api-access-mmndn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:00:02 crc kubenswrapper[4922]: I1128 07:00:02.933284 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9c226e4-e4fe-426b-9243-9ff96f72c93a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b9c226e4-e4fe-426b-9243-9ff96f72c93a" (UID: "b9c226e4-e4fe-426b-9243-9ff96f72c93a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:00:03 crc kubenswrapper[4922]: I1128 07:00:03.022487 4922 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9c226e4-e4fe-426b-9243-9ff96f72c93a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 07:00:03 crc kubenswrapper[4922]: I1128 07:00:03.022531 4922 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9c226e4-e4fe-426b-9243-9ff96f72c93a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 07:00:03 crc kubenswrapper[4922]: I1128 07:00:03.022546 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmndn\" (UniqueName: \"kubernetes.io/projected/b9c226e4-e4fe-426b-9243-9ff96f72c93a-kube-api-access-mmndn\") on node \"crc\" DevicePath \"\"" Nov 28 07:00:03 crc kubenswrapper[4922]: I1128 07:00:03.499383 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" event={"ID":"b9c226e4-e4fe-426b-9243-9ff96f72c93a","Type":"ContainerDied","Data":"b1db0e2e141fe1f742e5f65c57656b7854ec83cad45d1b67da29b52aba19f5be"} Nov 28 07:00:03 crc kubenswrapper[4922]: I1128 07:00:03.499443 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm" Nov 28 07:00:03 crc kubenswrapper[4922]: I1128 07:00:03.499466 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1db0e2e141fe1f742e5f65c57656b7854ec83cad45d1b67da29b52aba19f5be" Nov 28 07:01:27 crc kubenswrapper[4922]: I1128 07:01:27.312128 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:01:27 crc kubenswrapper[4922]: I1128 07:01:27.312920 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:01:57 crc kubenswrapper[4922]: I1128 07:01:57.312800 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:01:57 crc kubenswrapper[4922]: I1128 07:01:57.313550 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:02:27 crc kubenswrapper[4922]: I1128 07:02:27.312047 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:02:27 crc kubenswrapper[4922]: I1128 07:02:27.313528 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:02:27 crc kubenswrapper[4922]: I1128 07:02:27.313611 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:02:27 crc kubenswrapper[4922]: I1128 07:02:27.314406 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e2cc3a05c61bc0a99d69c8dde17ac05a9604299976427f43298e281dd4e8335e"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:02:27 crc kubenswrapper[4922]: I1128 07:02:27.314494 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://e2cc3a05c61bc0a99d69c8dde17ac05a9604299976427f43298e281dd4e8335e" gracePeriod=600 Nov 28 07:02:29 crc kubenswrapper[4922]: I1128 07:02:29.526049 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="e2cc3a05c61bc0a99d69c8dde17ac05a9604299976427f43298e281dd4e8335e" exitCode=0 Nov 28 07:02:29 crc kubenswrapper[4922]: I1128 07:02:29.526107 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"e2cc3a05c61bc0a99d69c8dde17ac05a9604299976427f43298e281dd4e8335e"} Nov 28 07:02:29 crc kubenswrapper[4922]: I1128 07:02:29.526658 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"9242a10d6f42a0f0e383f361e9eb3616225ad2f2836bf6798d0eb7b0a3a3d7b4"} Nov 28 07:02:29 crc kubenswrapper[4922]: I1128 07:02:29.526682 4922 scope.go:117] "RemoveContainer" containerID="faa2e15e685a62da719acf12fe75a2f590f6b07faa5cd9c8e0a536878c38e595" Nov 28 07:04:35 crc kubenswrapper[4922]: I1128 07:04:35.805333 4922 scope.go:117] "RemoveContainer" containerID="c824e0d3ca95486feb30581033528fe2fd342e53dc90c13fb3c522b924c03206" Nov 28 07:04:35 crc kubenswrapper[4922]: I1128 07:04:35.837109 4922 scope.go:117] "RemoveContainer" containerID="8783dcf357914bed469f9108dbe6e45affd4e86c285e240b65312f111a532b89" Nov 28 07:04:57 crc kubenswrapper[4922]: I1128 07:04:57.313150 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:04:57 crc kubenswrapper[4922]: I1128 07:04:57.313881 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:05:26 crc kubenswrapper[4922]: I1128 07:05:26.254658 4922 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 07:05:27 crc kubenswrapper[4922]: I1128 07:05:27.312032 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:05:27 crc kubenswrapper[4922]: I1128 07:05:27.312973 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:05:57 crc kubenswrapper[4922]: I1128 07:05:57.311724 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:05:57 crc kubenswrapper[4922]: I1128 07:05:57.312574 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:05:57 crc kubenswrapper[4922]: I1128 07:05:57.312658 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:05:57 crc kubenswrapper[4922]: I1128 07:05:57.313682 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9242a10d6f42a0f0e383f361e9eb3616225ad2f2836bf6798d0eb7b0a3a3d7b4"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:05:57 crc kubenswrapper[4922]: I1128 07:05:57.313788 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://9242a10d6f42a0f0e383f361e9eb3616225ad2f2836bf6798d0eb7b0a3a3d7b4" gracePeriod=600 Nov 28 07:05:58 crc kubenswrapper[4922]: I1128 07:05:58.163334 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="9242a10d6f42a0f0e383f361e9eb3616225ad2f2836bf6798d0eb7b0a3a3d7b4" exitCode=0 Nov 28 07:05:58 crc kubenswrapper[4922]: I1128 07:05:58.163422 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"9242a10d6f42a0f0e383f361e9eb3616225ad2f2836bf6798d0eb7b0a3a3d7b4"} Nov 28 07:05:58 crc kubenswrapper[4922]: I1128 07:05:58.163865 4922 scope.go:117] "RemoveContainer" containerID="e2cc3a05c61bc0a99d69c8dde17ac05a9604299976427f43298e281dd4e8335e" Nov 28 07:05:59 crc kubenswrapper[4922]: I1128 07:05:59.174086 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"e4c215f7c16b1b6ee789152f5bfa304df0b7e2d633a6748eb5b815f0448ea2e7"} Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.466908 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7gdxt"] Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.468508 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovn-controller" containerID="cri-o://403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55" gracePeriod=30 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.468687 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kube-rbac-proxy-node" containerID="cri-o://d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153" gracePeriod=30 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.468743 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443" gracePeriod=30 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.468772 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="nbdb" containerID="cri-o://6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd" gracePeriod=30 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.468861 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="sbdb" containerID="cri-o://d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9" gracePeriod=30 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.468936 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovn-acl-logging" containerID="cri-o://4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782" gracePeriod=30 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.468969 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="northd" containerID="cri-o://e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0" gracePeriod=30 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.520999 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" containerID="cri-o://4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee" gracePeriod=30 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.675414 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/2.log" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.675963 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/1.log" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.676015 4922 generic.go:334] "Generic (PLEG): container finished" podID="b05f16bb-1729-4fd8-883a-4fb960bf4cff" containerID="cd044b0293e9c0b8120f81513a27b31b65138f0396ed6a9d48e1b3c3da93f027" exitCode=2 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.676125 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jgzjd" event={"ID":"b05f16bb-1729-4fd8-883a-4fb960bf4cff","Type":"ContainerDied","Data":"cd044b0293e9c0b8120f81513a27b31b65138f0396ed6a9d48e1b3c3da93f027"} Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.676204 4922 scope.go:117] "RemoveContainer" containerID="18ece98b3e5c71e3aa7b48051d91a0684f108688cd8a31e00422d5d0b047a76e" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.676725 4922 scope.go:117] "RemoveContainer" containerID="cd044b0293e9c0b8120f81513a27b31b65138f0396ed6a9d48e1b3c3da93f027" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.679985 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovnkube-controller/3.log" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.682252 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovn-acl-logging/0.log" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.682715 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovn-controller/0.log" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683043 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee" exitCode=0 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683071 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443" exitCode=0 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683080 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153" exitCode=0 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683089 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782" exitCode=143 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683099 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55" exitCode=143 Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683122 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee"} Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683153 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443"} Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683166 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153"} Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683178 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782"} Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.683190 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55"} Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.738970 4922 scope.go:117] "RemoveContainer" containerID="34af1a43ea6a98f41968e9d8fedf88b2886eff55e56e71236e5e9a4c181652af" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.841369 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovn-acl-logging/0.log" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.842057 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovn-controller/0.log" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.842455 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.893133 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nzttm"] Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.893585 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="nbdb" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.893614 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="nbdb" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.893628 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovn-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894029 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovn-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894060 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="northd" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894072 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="northd" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894086 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kube-rbac-proxy-node" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894097 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kube-rbac-proxy-node" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894112 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894121 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894133 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894142 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894154 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894163 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894179 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="sbdb" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894190 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="sbdb" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894206 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9c226e4-e4fe-426b-9243-9ff96f72c93a" containerName="collect-profiles" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894286 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9c226e4-e4fe-426b-9243-9ff96f72c93a" containerName="collect-profiles" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894301 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kubecfg-setup" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894311 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kubecfg-setup" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894323 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovn-acl-logging" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894332 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovn-acl-logging" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894351 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894362 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894513 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894532 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894543 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9c226e4-e4fe-426b-9243-9ff96f72c93a" containerName="collect-profiles" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894556 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894568 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovn-acl-logging" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894580 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894592 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894603 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="kube-rbac-proxy-node" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894613 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="northd" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894626 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="sbdb" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894639 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="nbdb" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894651 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovn-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894783 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894797 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: E1128 07:07:10.894812 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894821 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.894962 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerName="ovnkube-controller" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.897090 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.936953 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-slash\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937011 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-ovn-kubernetes\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937040 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-log-socket\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937074 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937123 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-config\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937160 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-netns\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937192 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-var-lib-openvswitch\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937243 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-bin\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937274 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-openvswitch\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937292 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937300 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-node-log\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937331 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-slash" (OuterVolumeSpecName: "host-slash") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937330 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-netd\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937365 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-systemd-units\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937369 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937398 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-script-lib\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937418 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-ovn\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937439 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-env-overrides\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937461 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovn-node-metrics-cert\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937488 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-etc-openvswitch\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937504 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtxdn\" (UniqueName: \"kubernetes.io/projected/ac5c6b67-2037-400e-8e03-845b47d8ca67-kube-api-access-dtxdn\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937520 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-systemd\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937533 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-kubelet\") pod \"ac5c6b67-2037-400e-8e03-845b47d8ca67\" (UID: \"ac5c6b67-2037-400e-8e03-845b47d8ca67\") " Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937600 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-node-log\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937621 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-env-overrides\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937641 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-log-socket\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937659 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-run-netns\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937675 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-cni-netd\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937689 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-var-lib-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937706 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28b5b\" (UniqueName: \"kubernetes.io/projected/3a854b65-3cdb-49d1-8597-86fe8669ba91-kube-api-access-28b5b\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937722 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-slash\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937750 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-etc-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937765 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-systemd\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937779 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovnkube-script-lib\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937796 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovn-node-metrics-cert\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937815 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-cni-bin\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937832 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937848 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-kubelet\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937869 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-ovn\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937888 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937905 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937925 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-systemd-units\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937941 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovnkube-config\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937955 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937974 4922 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937984 4922 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.937993 4922 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938025 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938024 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938049 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938055 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-log-socket" (OuterVolumeSpecName: "log-socket") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938068 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938113 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938144 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938378 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938384 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-node-log" (OuterVolumeSpecName: "node-log") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938412 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938839 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938916 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.938934 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.944139 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.944404 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac5c6b67-2037-400e-8e03-845b47d8ca67-kube-api-access-dtxdn" (OuterVolumeSpecName: "kube-api-access-dtxdn") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "kube-api-access-dtxdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:07:10 crc kubenswrapper[4922]: I1128 07:07:10.951387 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "ac5c6b67-2037-400e-8e03-845b47d8ca67" (UID: "ac5c6b67-2037-400e-8e03-845b47d8ca67"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038448 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovnkube-config\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038495 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-node-log\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038522 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-env-overrides\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038544 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-log-socket\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038570 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-run-netns\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038591 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-cni-netd\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038609 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-var-lib-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038631 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28b5b\" (UniqueName: \"kubernetes.io/projected/3a854b65-3cdb-49d1-8597-86fe8669ba91-kube-api-access-28b5b\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038638 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-node-log\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038650 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-slash\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038736 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-etc-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038745 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-var-lib-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038775 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-systemd\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038821 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-cni-netd\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038678 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-slash\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038790 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-etc-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038823 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovnkube-script-lib\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038880 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovn-node-metrics-cert\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038878 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-systemd\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038916 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-cni-bin\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038941 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038965 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-kubelet\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038989 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-ovn\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039021 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039044 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039068 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-systemd-units\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039119 4922 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039134 4922 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039144 4922 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039156 4922 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039167 4922 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039179 4922 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039190 4922 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039200 4922 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039211 4922 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039250 4922 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039262 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtxdn\" (UniqueName: \"kubernetes.io/projected/ac5c6b67-2037-400e-8e03-845b47d8ca67-kube-api-access-dtxdn\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039272 4922 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039282 4922 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039294 4922 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039305 4922 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039317 4922 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ac5c6b67-2037-400e-8e03-845b47d8ca67-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039329 4922 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ac5c6b67-2037-400e-8e03-845b47d8ca67-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039360 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-systemd-units\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039392 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-cni-bin\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039423 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-ovn\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039437 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-kubelet\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039453 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039483 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-run-openvswitch\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.038731 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-run-netns\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039492 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039903 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-env-overrides\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039921 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovnkube-config\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.039987 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3a854b65-3cdb-49d1-8597-86fe8669ba91-log-socket\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.040354 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovnkube-script-lib\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.044771 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3a854b65-3cdb-49d1-8597-86fe8669ba91-ovn-node-metrics-cert\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.063706 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28b5b\" (UniqueName: \"kubernetes.io/projected/3a854b65-3cdb-49d1-8597-86fe8669ba91-kube-api-access-28b5b\") pod \"ovnkube-node-nzttm\" (UID: \"3a854b65-3cdb-49d1-8597-86fe8669ba91\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.210416 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.694316 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovn-acl-logging/0.log" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696144 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-7gdxt_ac5c6b67-2037-400e-8e03-845b47d8ca67/ovn-controller/0.log" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696595 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9" exitCode=0 Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696668 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd" exitCode=0 Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696687 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5c6b67-2037-400e-8e03-845b47d8ca67" containerID="e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0" exitCode=0 Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696704 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9"} Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696752 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd"} Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696763 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0"} Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696774 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" event={"ID":"ac5c6b67-2037-400e-8e03-845b47d8ca67","Type":"ContainerDied","Data":"6cf5f8236249cc30915ac74dd4562edd109c30472174248c7b941efb278a4f52"} Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696778 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-7gdxt" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.696791 4922 scope.go:117] "RemoveContainer" containerID="4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.698571 4922 generic.go:334] "Generic (PLEG): container finished" podID="3a854b65-3cdb-49d1-8597-86fe8669ba91" containerID="ec96287e4a109f6aebcef65bb7dbac7e3a68a82c0ca00b7a58b7f33dd68b9dfa" exitCode=0 Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.698629 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerDied","Data":"ec96287e4a109f6aebcef65bb7dbac7e3a68a82c0ca00b7a58b7f33dd68b9dfa"} Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.698691 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"5201ea6bc960d392247d75a104e79c579d1683d33e8c936141e0be9c55d650a8"} Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.702546 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-jgzjd_b05f16bb-1729-4fd8-883a-4fb960bf4cff/kube-multus/2.log" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.702595 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-jgzjd" event={"ID":"b05f16bb-1729-4fd8-883a-4fb960bf4cff","Type":"ContainerStarted","Data":"4a2c4063a2008b964b95c307b0849065f726b8a30f72110dd2bd391b15a417d9"} Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.721988 4922 scope.go:117] "RemoveContainer" containerID="d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.777392 4922 scope.go:117] "RemoveContainer" containerID="6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.804751 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7gdxt"] Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.805550 4922 scope.go:117] "RemoveContainer" containerID="e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.810129 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-7gdxt"] Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.838428 4922 scope.go:117] "RemoveContainer" containerID="2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.854457 4922 scope.go:117] "RemoveContainer" containerID="d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.877065 4922 scope.go:117] "RemoveContainer" containerID="4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.900363 4922 scope.go:117] "RemoveContainer" containerID="403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.918718 4922 scope.go:117] "RemoveContainer" containerID="3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.957439 4922 scope.go:117] "RemoveContainer" containerID="4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.958023 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee\": container with ID starting with 4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee not found: ID does not exist" containerID="4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.958072 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee"} err="failed to get container status \"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee\": rpc error: code = NotFound desc = could not find container \"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee\": container with ID starting with 4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.958103 4922 scope.go:117] "RemoveContainer" containerID="d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.958583 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\": container with ID starting with d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9 not found: ID does not exist" containerID="d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.958620 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9"} err="failed to get container status \"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\": rpc error: code = NotFound desc = could not find container \"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\": container with ID starting with d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.958642 4922 scope.go:117] "RemoveContainer" containerID="6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.959057 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\": container with ID starting with 6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd not found: ID does not exist" containerID="6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.959088 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd"} err="failed to get container status \"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\": rpc error: code = NotFound desc = could not find container \"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\": container with ID starting with 6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.959107 4922 scope.go:117] "RemoveContainer" containerID="e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.959515 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\": container with ID starting with e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0 not found: ID does not exist" containerID="e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.959546 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0"} err="failed to get container status \"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\": rpc error: code = NotFound desc = could not find container \"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\": container with ID starting with e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.959564 4922 scope.go:117] "RemoveContainer" containerID="2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.960316 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\": container with ID starting with 2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443 not found: ID does not exist" containerID="2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.960344 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443"} err="failed to get container status \"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\": rpc error: code = NotFound desc = could not find container \"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\": container with ID starting with 2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.960360 4922 scope.go:117] "RemoveContainer" containerID="d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.960651 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\": container with ID starting with d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153 not found: ID does not exist" containerID="d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.960677 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153"} err="failed to get container status \"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\": rpc error: code = NotFound desc = could not find container \"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\": container with ID starting with d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.960694 4922 scope.go:117] "RemoveContainer" containerID="4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.961369 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\": container with ID starting with 4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782 not found: ID does not exist" containerID="4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.961395 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782"} err="failed to get container status \"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\": rpc error: code = NotFound desc = could not find container \"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\": container with ID starting with 4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.961412 4922 scope.go:117] "RemoveContainer" containerID="403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.961696 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\": container with ID starting with 403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55 not found: ID does not exist" containerID="403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.961723 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55"} err="failed to get container status \"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\": rpc error: code = NotFound desc = could not find container \"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\": container with ID starting with 403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.961742 4922 scope.go:117] "RemoveContainer" containerID="3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24" Nov 28 07:07:11 crc kubenswrapper[4922]: E1128 07:07:11.962087 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\": container with ID starting with 3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24 not found: ID does not exist" containerID="3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.962114 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24"} err="failed to get container status \"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\": rpc error: code = NotFound desc = could not find container \"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\": container with ID starting with 3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.962131 4922 scope.go:117] "RemoveContainer" containerID="4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.962403 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee"} err="failed to get container status \"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee\": rpc error: code = NotFound desc = could not find container \"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee\": container with ID starting with 4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.962428 4922 scope.go:117] "RemoveContainer" containerID="d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.962780 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9"} err="failed to get container status \"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\": rpc error: code = NotFound desc = could not find container \"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\": container with ID starting with d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.962803 4922 scope.go:117] "RemoveContainer" containerID="6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.963264 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd"} err="failed to get container status \"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\": rpc error: code = NotFound desc = could not find container \"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\": container with ID starting with 6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.963290 4922 scope.go:117] "RemoveContainer" containerID="e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.963755 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0"} err="failed to get container status \"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\": rpc error: code = NotFound desc = could not find container \"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\": container with ID starting with e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.963786 4922 scope.go:117] "RemoveContainer" containerID="2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.964117 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443"} err="failed to get container status \"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\": rpc error: code = NotFound desc = could not find container \"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\": container with ID starting with 2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.964140 4922 scope.go:117] "RemoveContainer" containerID="d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.964527 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153"} err="failed to get container status \"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\": rpc error: code = NotFound desc = could not find container \"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\": container with ID starting with d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.964579 4922 scope.go:117] "RemoveContainer" containerID="4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.964954 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782"} err="failed to get container status \"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\": rpc error: code = NotFound desc = could not find container \"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\": container with ID starting with 4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.964980 4922 scope.go:117] "RemoveContainer" containerID="403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.965379 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55"} err="failed to get container status \"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\": rpc error: code = NotFound desc = could not find container \"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\": container with ID starting with 403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.965405 4922 scope.go:117] "RemoveContainer" containerID="3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.965704 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24"} err="failed to get container status \"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\": rpc error: code = NotFound desc = could not find container \"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\": container with ID starting with 3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.965728 4922 scope.go:117] "RemoveContainer" containerID="4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.966078 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee"} err="failed to get container status \"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee\": rpc error: code = NotFound desc = could not find container \"4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee\": container with ID starting with 4d624c510caffbd9c395e40cf877efb8665931031243034a0c2ab69d9a8bbeee not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.966100 4922 scope.go:117] "RemoveContainer" containerID="d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.966569 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9"} err="failed to get container status \"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\": rpc error: code = NotFound desc = could not find container \"d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9\": container with ID starting with d31c99f96b8af9c7b9c47c4ccd0b6485b23c9eacfe293667038042df8330d8a9 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.966607 4922 scope.go:117] "RemoveContainer" containerID="6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.966901 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd"} err="failed to get container status \"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\": rpc error: code = NotFound desc = could not find container \"6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd\": container with ID starting with 6e9942f8e8cdf6ffd04d46b29be5a53fcc8c4cb5d79f50d7747e2fe6eb744ffd not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.967085 4922 scope.go:117] "RemoveContainer" containerID="e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.967502 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0"} err="failed to get container status \"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\": rpc error: code = NotFound desc = could not find container \"e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0\": container with ID starting with e91ea6957087e19d43626438d68bf92a19a6f35325de570738354edd61da5bb0 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.967531 4922 scope.go:117] "RemoveContainer" containerID="2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.967824 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443"} err="failed to get container status \"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\": rpc error: code = NotFound desc = could not find container \"2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443\": container with ID starting with 2c1a545fef7aec25b21dd112c54eeb7b94306cafaf4732a50de9ff10a409c443 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.967844 4922 scope.go:117] "RemoveContainer" containerID="d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.968178 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153"} err="failed to get container status \"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\": rpc error: code = NotFound desc = could not find container \"d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153\": container with ID starting with d0f038ffe6c8ec5ec836379b827ecb71967119efca3f2cd4ce5d3006fa96a153 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.968199 4922 scope.go:117] "RemoveContainer" containerID="4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.968591 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782"} err="failed to get container status \"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\": rpc error: code = NotFound desc = could not find container \"4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782\": container with ID starting with 4f56366e2355663eb896c09a6710166b89f794a7f17cb317f6c4299e8b317782 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.968615 4922 scope.go:117] "RemoveContainer" containerID="403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.968940 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55"} err="failed to get container status \"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\": rpc error: code = NotFound desc = could not find container \"403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55\": container with ID starting with 403e86b4b99e8bc85be77996288705f6ea8ff5da8e9b7dfb09749f4341065b55 not found: ID does not exist" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.968966 4922 scope.go:117] "RemoveContainer" containerID="3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24" Nov 28 07:07:11 crc kubenswrapper[4922]: I1128 07:07:11.969398 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24"} err="failed to get container status \"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\": rpc error: code = NotFound desc = could not find container \"3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24\": container with ID starting with 3aaa3ffb5b99838c7969b3e241ad9d7a0bc4797fac3ec52280914abbb7ca1b24 not found: ID does not exist" Nov 28 07:07:12 crc kubenswrapper[4922]: I1128 07:07:12.711405 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"c37452d081358e45cbe35daa6b5fb28eb62bec3c4ea89712d5ac3ea6917871cb"} Nov 28 07:07:12 crc kubenswrapper[4922]: I1128 07:07:12.711937 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"bf0a66b0b494c3b2de6e2a2c8bcfd689cad755ad610c0a87ab8da6633e421771"} Nov 28 07:07:12 crc kubenswrapper[4922]: I1128 07:07:12.711950 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"37acd3ec4fa8f1e8c530ab27b0e780360d0afd991456f0868887deed95a70828"} Nov 28 07:07:12 crc kubenswrapper[4922]: I1128 07:07:12.711958 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"f8ef6c0749aff5138ea91794ab4f8a1a08c0209dc4c2b972ba8c1dcc02385dc1"} Nov 28 07:07:12 crc kubenswrapper[4922]: I1128 07:07:12.711971 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"e27c9e34442cef6708fb55f011c9f9f1e9dd29f2527a87cceda5cdc40e86641f"} Nov 28 07:07:12 crc kubenswrapper[4922]: I1128 07:07:12.711980 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"2b818516813126ca96c951d8bfa7f25696e48227a1e412cc2bd0392edd306287"} Nov 28 07:07:13 crc kubenswrapper[4922]: I1128 07:07:13.408543 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac5c6b67-2037-400e-8e03-845b47d8ca67" path="/var/lib/kubelet/pods/ac5c6b67-2037-400e-8e03-845b47d8ca67/volumes" Nov 28 07:07:15 crc kubenswrapper[4922]: I1128 07:07:15.752479 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"81b3592f81e0dea6a95063aec2c759f9a0b345c9550270fa785df92896e803d6"} Nov 28 07:07:17 crc kubenswrapper[4922]: I1128 07:07:17.785382 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" event={"ID":"3a854b65-3cdb-49d1-8597-86fe8669ba91","Type":"ContainerStarted","Data":"890fa6be154416aa1c87e38d992c910aaca93d3399dc7b1c198f7d4e9e2238e2"} Nov 28 07:07:19 crc kubenswrapper[4922]: I1128 07:07:18.791468 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:19 crc kubenswrapper[4922]: I1128 07:07:18.791761 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:19 crc kubenswrapper[4922]: I1128 07:07:18.791814 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:19 crc kubenswrapper[4922]: I1128 07:07:18.817840 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" podStartSLOduration=8.817819923 podStartE2EDuration="8.817819923s" podCreationTimestamp="2025-11-28 07:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:07:18.81436672 +0000 UTC m=+883.734762322" watchObservedRunningTime="2025-11-28 07:07:18.817819923 +0000 UTC m=+883.738215525" Nov 28 07:07:19 crc kubenswrapper[4922]: I1128 07:07:18.832872 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:19 crc kubenswrapper[4922]: I1128 07:07:18.835548 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.377911 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-p72j6"] Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.379813 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.382694 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.382740 4922 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-qvp8t" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.383581 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.389294 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.389980 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/08e23417-2d96-4c14-b56d-5b2b28fde979-node-mnt\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.390094 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4475\" (UniqueName: \"kubernetes.io/projected/08e23417-2d96-4c14-b56d-5b2b28fde979-kube-api-access-n4475\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.390186 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/08e23417-2d96-4c14-b56d-5b2b28fde979-crc-storage\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.412740 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-p72j6"] Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.490964 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/08e23417-2d96-4c14-b56d-5b2b28fde979-crc-storage\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.491199 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/08e23417-2d96-4c14-b56d-5b2b28fde979-node-mnt\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.491348 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4475\" (UniqueName: \"kubernetes.io/projected/08e23417-2d96-4c14-b56d-5b2b28fde979-kube-api-access-n4475\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.491423 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/08e23417-2d96-4c14-b56d-5b2b28fde979-node-mnt\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.492010 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/08e23417-2d96-4c14-b56d-5b2b28fde979-crc-storage\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.512996 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4475\" (UniqueName: \"kubernetes.io/projected/08e23417-2d96-4c14-b56d-5b2b28fde979-kube-api-access-n4475\") pod \"crc-storage-crc-p72j6\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.708969 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.932083 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-p72j6"] Nov 28 07:07:23 crc kubenswrapper[4922]: I1128 07:07:23.946142 4922 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 07:07:24 crc kubenswrapper[4922]: I1128 07:07:24.825090 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-p72j6" event={"ID":"08e23417-2d96-4c14-b56d-5b2b28fde979","Type":"ContainerStarted","Data":"ef6f1855030e957822cb5879f81ac9fd48b5c943ad953bc394bba517d562d7e5"} Nov 28 07:07:25 crc kubenswrapper[4922]: I1128 07:07:25.832795 4922 generic.go:334] "Generic (PLEG): container finished" podID="08e23417-2d96-4c14-b56d-5b2b28fde979" containerID="88a6b3a8da8a117ee13dfc5a1de000a39a195e92aa978e18f720e03517578f74" exitCode=0 Nov 28 07:07:25 crc kubenswrapper[4922]: I1128 07:07:25.832845 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-p72j6" event={"ID":"08e23417-2d96-4c14-b56d-5b2b28fde979","Type":"ContainerDied","Data":"88a6b3a8da8a117ee13dfc5a1de000a39a195e92aa978e18f720e03517578f74"} Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.163293 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.346153 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4475\" (UniqueName: \"kubernetes.io/projected/08e23417-2d96-4c14-b56d-5b2b28fde979-kube-api-access-n4475\") pod \"08e23417-2d96-4c14-b56d-5b2b28fde979\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.346392 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/08e23417-2d96-4c14-b56d-5b2b28fde979-crc-storage\") pod \"08e23417-2d96-4c14-b56d-5b2b28fde979\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.346596 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/08e23417-2d96-4c14-b56d-5b2b28fde979-node-mnt\") pod \"08e23417-2d96-4c14-b56d-5b2b28fde979\" (UID: \"08e23417-2d96-4c14-b56d-5b2b28fde979\") " Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.346746 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08e23417-2d96-4c14-b56d-5b2b28fde979-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "08e23417-2d96-4c14-b56d-5b2b28fde979" (UID: "08e23417-2d96-4c14-b56d-5b2b28fde979"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.346988 4922 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/08e23417-2d96-4c14-b56d-5b2b28fde979-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.355819 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08e23417-2d96-4c14-b56d-5b2b28fde979-kube-api-access-n4475" (OuterVolumeSpecName: "kube-api-access-n4475") pod "08e23417-2d96-4c14-b56d-5b2b28fde979" (UID: "08e23417-2d96-4c14-b56d-5b2b28fde979"). InnerVolumeSpecName "kube-api-access-n4475". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.371955 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08e23417-2d96-4c14-b56d-5b2b28fde979-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "08e23417-2d96-4c14-b56d-5b2b28fde979" (UID: "08e23417-2d96-4c14-b56d-5b2b28fde979"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.452248 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4475\" (UniqueName: \"kubernetes.io/projected/08e23417-2d96-4c14-b56d-5b2b28fde979-kube-api-access-n4475\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.452292 4922 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/08e23417-2d96-4c14-b56d-5b2b28fde979-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.847285 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-p72j6" event={"ID":"08e23417-2d96-4c14-b56d-5b2b28fde979","Type":"ContainerDied","Data":"ef6f1855030e957822cb5879f81ac9fd48b5c943ad953bc394bba517d562d7e5"} Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.847664 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef6f1855030e957822cb5879f81ac9fd48b5c943ad953bc394bba517d562d7e5" Nov 28 07:07:27 crc kubenswrapper[4922]: I1128 07:07:27.847374 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-p72j6" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.697535 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-knlzd"] Nov 28 07:07:34 crc kubenswrapper[4922]: E1128 07:07:34.698561 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e23417-2d96-4c14-b56d-5b2b28fde979" containerName="storage" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.698595 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e23417-2d96-4c14-b56d-5b2b28fde979" containerName="storage" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.698796 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e23417-2d96-4c14-b56d-5b2b28fde979" containerName="storage" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.700018 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.717365 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlzd"] Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.847672 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf4zq\" (UniqueName: \"kubernetes.io/projected/5d6b2b54-1eff-4308-91f0-9ce802a9136e-kube-api-access-gf4zq\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.847980 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-catalog-content\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.848113 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-utilities\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.949008 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-catalog-content\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.949309 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-utilities\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.949415 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf4zq\" (UniqueName: \"kubernetes.io/projected/5d6b2b54-1eff-4308-91f0-9ce802a9136e-kube-api-access-gf4zq\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.949670 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-catalog-content\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.949759 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-utilities\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:34 crc kubenswrapper[4922]: I1128 07:07:34.985655 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf4zq\" (UniqueName: \"kubernetes.io/projected/5d6b2b54-1eff-4308-91f0-9ce802a9136e-kube-api-access-gf4zq\") pod \"redhat-marketplace-knlzd\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:35 crc kubenswrapper[4922]: I1128 07:07:35.020154 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:35 crc kubenswrapper[4922]: I1128 07:07:35.451386 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlzd"] Nov 28 07:07:35 crc kubenswrapper[4922]: I1128 07:07:35.903837 4922 generic.go:334] "Generic (PLEG): container finished" podID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerID="aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c" exitCode=0 Nov 28 07:07:35 crc kubenswrapper[4922]: I1128 07:07:35.904153 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlzd" event={"ID":"5d6b2b54-1eff-4308-91f0-9ce802a9136e","Type":"ContainerDied","Data":"aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c"} Nov 28 07:07:35 crc kubenswrapper[4922]: I1128 07:07:35.904239 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlzd" event={"ID":"5d6b2b54-1eff-4308-91f0-9ce802a9136e","Type":"ContainerStarted","Data":"c5ed58d3112c5ee54bb6fcd21cab8fdb12b637e8f8b07ec02d3c7b4094481494"} Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.484522 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw"] Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.485461 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.488293 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.507139 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw"] Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.672968 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.673325 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.673404 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgfx8\" (UniqueName: \"kubernetes.io/projected/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-kube-api-access-rgfx8\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.774781 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.774850 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.774893 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgfx8\" (UniqueName: \"kubernetes.io/projected/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-kube-api-access-rgfx8\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.775810 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.776035 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.807418 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgfx8\" (UniqueName: \"kubernetes.io/projected/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-kube-api-access-rgfx8\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.915054 4922 generic.go:334] "Generic (PLEG): container finished" podID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerID="e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44" exitCode=0 Nov 28 07:07:36 crc kubenswrapper[4922]: I1128 07:07:36.915106 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlzd" event={"ID":"5d6b2b54-1eff-4308-91f0-9ce802a9136e","Type":"ContainerDied","Data":"e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44"} Nov 28 07:07:37 crc kubenswrapper[4922]: I1128 07:07:37.107153 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:37 crc kubenswrapper[4922]: I1128 07:07:37.542934 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw"] Nov 28 07:07:37 crc kubenswrapper[4922]: W1128 07:07:37.552438 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd39c4e1_e433_4bd0_89e5_4c3fd87987e7.slice/crio-0e7614425db8b7c029c0fdf08160621f0a43512c4ee2eec219d9f086171f389f WatchSource:0}: Error finding container 0e7614425db8b7c029c0fdf08160621f0a43512c4ee2eec219d9f086171f389f: Status 404 returned error can't find the container with id 0e7614425db8b7c029c0fdf08160621f0a43512c4ee2eec219d9f086171f389f Nov 28 07:07:37 crc kubenswrapper[4922]: I1128 07:07:37.924648 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlzd" event={"ID":"5d6b2b54-1eff-4308-91f0-9ce802a9136e","Type":"ContainerStarted","Data":"8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a"} Nov 28 07:07:37 crc kubenswrapper[4922]: I1128 07:07:37.928283 4922 generic.go:334] "Generic (PLEG): container finished" podID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerID="4f6b52ed1be95aa4c43410ff2a2c9f2dbc44ad17835e8e39a843e2fab66c98ab" exitCode=0 Nov 28 07:07:37 crc kubenswrapper[4922]: I1128 07:07:37.928329 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" event={"ID":"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7","Type":"ContainerDied","Data":"4f6b52ed1be95aa4c43410ff2a2c9f2dbc44ad17835e8e39a843e2fab66c98ab"} Nov 28 07:07:37 crc kubenswrapper[4922]: I1128 07:07:37.928354 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" event={"ID":"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7","Type":"ContainerStarted","Data":"0e7614425db8b7c029c0fdf08160621f0a43512c4ee2eec219d9f086171f389f"} Nov 28 07:07:37 crc kubenswrapper[4922]: I1128 07:07:37.944662 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-knlzd" podStartSLOduration=2.389168714 podStartE2EDuration="3.944647324s" podCreationTimestamp="2025-11-28 07:07:34 +0000 UTC" firstStartedPulling="2025-11-28 07:07:35.905876089 +0000 UTC m=+900.826271671" lastFinishedPulling="2025-11-28 07:07:37.461354689 +0000 UTC m=+902.381750281" observedRunningTime="2025-11-28 07:07:37.943407151 +0000 UTC m=+902.863802773" watchObservedRunningTime="2025-11-28 07:07:37.944647324 +0000 UTC m=+902.865042906" Nov 28 07:07:39 crc kubenswrapper[4922]: I1128 07:07:39.941919 4922 generic.go:334] "Generic (PLEG): container finished" podID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerID="7f7d2370b41a5deabfab20087d1cabb8f92d056a8bc01003c7bfa11aafa93b71" exitCode=0 Nov 28 07:07:39 crc kubenswrapper[4922]: I1128 07:07:39.941983 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" event={"ID":"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7","Type":"ContainerDied","Data":"7f7d2370b41a5deabfab20087d1cabb8f92d056a8bc01003c7bfa11aafa93b71"} Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.044487 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-x4k7x"] Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.049238 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.057893 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x4k7x"] Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.219211 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-utilities\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.219306 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-catalog-content\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.219339 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-982nq\" (UniqueName: \"kubernetes.io/projected/e0550718-6eeb-42e1-890f-2ff3144a2185-kube-api-access-982nq\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.320686 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-utilities\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.320739 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-catalog-content\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.320756 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-982nq\" (UniqueName: \"kubernetes.io/projected/e0550718-6eeb-42e1-890f-2ff3144a2185-kube-api-access-982nq\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.321242 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-utilities\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.321414 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-catalog-content\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.346774 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-982nq\" (UniqueName: \"kubernetes.io/projected/e0550718-6eeb-42e1-890f-2ff3144a2185-kube-api-access-982nq\") pod \"redhat-operators-x4k7x\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.407796 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.839849 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x4k7x"] Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.946701 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x4k7x" event={"ID":"e0550718-6eeb-42e1-890f-2ff3144a2185","Type":"ContainerStarted","Data":"1d234fcc791f3b0df599657063468da9bf33604ab3708011c5a8f60ff73b7682"} Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.948822 4922 generic.go:334] "Generic (PLEG): container finished" podID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerID="a02210b29d2a1cf406ee8c0e02a9eff0ca6b3b05169a52543e31450e94d77255" exitCode=0 Nov 28 07:07:40 crc kubenswrapper[4922]: I1128 07:07:40.948859 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" event={"ID":"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7","Type":"ContainerDied","Data":"a02210b29d2a1cf406ee8c0e02a9eff0ca6b3b05169a52543e31450e94d77255"} Nov 28 07:07:41 crc kubenswrapper[4922]: I1128 07:07:41.271046 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzttm" Nov 28 07:07:41 crc kubenswrapper[4922]: I1128 07:07:41.957360 4922 generic.go:334] "Generic (PLEG): container finished" podID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerID="1da1c5486074eba6c9d4924e23685b960435df3e0a8695e11d1ccb3551ad74a7" exitCode=0 Nov 28 07:07:41 crc kubenswrapper[4922]: I1128 07:07:41.957422 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x4k7x" event={"ID":"e0550718-6eeb-42e1-890f-2ff3144a2185","Type":"ContainerDied","Data":"1da1c5486074eba6c9d4924e23685b960435df3e0a8695e11d1ccb3551ad74a7"} Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.177714 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.347113 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-bundle\") pod \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.347168 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgfx8\" (UniqueName: \"kubernetes.io/projected/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-kube-api-access-rgfx8\") pod \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.347215 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-util\") pod \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\" (UID: \"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7\") " Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.347880 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-bundle" (OuterVolumeSpecName: "bundle") pod "dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" (UID: "dd39c4e1-e433-4bd0-89e5-4c3fd87987e7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.356153 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-kube-api-access-rgfx8" (OuterVolumeSpecName: "kube-api-access-rgfx8") pod "dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" (UID: "dd39c4e1-e433-4bd0-89e5-4c3fd87987e7"). InnerVolumeSpecName "kube-api-access-rgfx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.382588 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-util" (OuterVolumeSpecName: "util") pod "dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" (UID: "dd39c4e1-e433-4bd0-89e5-4c3fd87987e7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.449122 4922 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.449284 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgfx8\" (UniqueName: \"kubernetes.io/projected/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-kube-api-access-rgfx8\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.449317 4922 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd39c4e1-e433-4bd0-89e5-4c3fd87987e7-util\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.971016 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" event={"ID":"dd39c4e1-e433-4bd0-89e5-4c3fd87987e7","Type":"ContainerDied","Data":"0e7614425db8b7c029c0fdf08160621f0a43512c4ee2eec219d9f086171f389f"} Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.971509 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e7614425db8b7c029c0fdf08160621f0a43512c4ee2eec219d9f086171f389f" Nov 28 07:07:42 crc kubenswrapper[4922]: I1128 07:07:42.971631 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw" Nov 28 07:07:43 crc kubenswrapper[4922]: I1128 07:07:43.981378 4922 generic.go:334] "Generic (PLEG): container finished" podID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerID="58a9688ec44f04f27b831605b25c08464f68c2db1207d1e6a317edc15072e9b0" exitCode=0 Nov 28 07:07:43 crc kubenswrapper[4922]: I1128 07:07:43.981445 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x4k7x" event={"ID":"e0550718-6eeb-42e1-890f-2ff3144a2185","Type":"ContainerDied","Data":"58a9688ec44f04f27b831605b25c08464f68c2db1207d1e6a317edc15072e9b0"} Nov 28 07:07:44 crc kubenswrapper[4922]: I1128 07:07:44.987745 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x4k7x" event={"ID":"e0550718-6eeb-42e1-890f-2ff3144a2185","Type":"ContainerStarted","Data":"a29406c6ef58a2f4587518ee10a51367b47bac4571513338c494c76b30ddc3ea"} Nov 28 07:07:45 crc kubenswrapper[4922]: I1128 07:07:45.013942 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-x4k7x" podStartSLOduration=2.375756965 podStartE2EDuration="5.013916733s" podCreationTimestamp="2025-11-28 07:07:40 +0000 UTC" firstStartedPulling="2025-11-28 07:07:41.959330236 +0000 UTC m=+906.879725828" lastFinishedPulling="2025-11-28 07:07:44.597489984 +0000 UTC m=+909.517885596" observedRunningTime="2025-11-28 07:07:45.005187065 +0000 UTC m=+909.925582657" watchObservedRunningTime="2025-11-28 07:07:45.013916733 +0000 UTC m=+909.934312335" Nov 28 07:07:45 crc kubenswrapper[4922]: I1128 07:07:45.020538 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:45 crc kubenswrapper[4922]: I1128 07:07:45.020761 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:45 crc kubenswrapper[4922]: I1128 07:07:45.078386 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.031987 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.194715 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp"] Nov 28 07:07:46 crc kubenswrapper[4922]: E1128 07:07:46.195092 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerName="util" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.195108 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerName="util" Nov 28 07:07:46 crc kubenswrapper[4922]: E1128 07:07:46.195127 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerName="pull" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.195134 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerName="pull" Nov 28 07:07:46 crc kubenswrapper[4922]: E1128 07:07:46.195167 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerName="extract" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.195173 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerName="extract" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.195291 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd39c4e1-e433-4bd0-89e5-4c3fd87987e7" containerName="extract" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.195678 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.198742 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.199549 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-nccj6" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.199846 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.200165 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp"] Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.301284 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b692p\" (UniqueName: \"kubernetes.io/projected/77e47301-a519-4efa-bd65-6530957b27a0-kube-api-access-b692p\") pod \"nmstate-operator-5b5b58f5c8-khmcp\" (UID: \"77e47301-a519-4efa-bd65-6530957b27a0\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.402898 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b692p\" (UniqueName: \"kubernetes.io/projected/77e47301-a519-4efa-bd65-6530957b27a0-kube-api-access-b692p\") pod \"nmstate-operator-5b5b58f5c8-khmcp\" (UID: \"77e47301-a519-4efa-bd65-6530957b27a0\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.420894 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b692p\" (UniqueName: \"kubernetes.io/projected/77e47301-a519-4efa-bd65-6530957b27a0-kube-api-access-b692p\") pod \"nmstate-operator-5b5b58f5c8-khmcp\" (UID: \"77e47301-a519-4efa-bd65-6530957b27a0\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.533430 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp" Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.782165 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp"] Nov 28 07:07:46 crc kubenswrapper[4922]: W1128 07:07:46.788167 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77e47301_a519_4efa_bd65_6530957b27a0.slice/crio-f39eb85fcdb68fe56b707cd134710ad933a7401633562bb508b616acc2fc89fd WatchSource:0}: Error finding container f39eb85fcdb68fe56b707cd134710ad933a7401633562bb508b616acc2fc89fd: Status 404 returned error can't find the container with id f39eb85fcdb68fe56b707cd134710ad933a7401633562bb508b616acc2fc89fd Nov 28 07:07:46 crc kubenswrapper[4922]: I1128 07:07:46.999391 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp" event={"ID":"77e47301-a519-4efa-bd65-6530957b27a0","Type":"ContainerStarted","Data":"f39eb85fcdb68fe56b707cd134710ad933a7401633562bb508b616acc2fc89fd"} Nov 28 07:07:48 crc kubenswrapper[4922]: I1128 07:07:48.628778 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlzd"] Nov 28 07:07:49 crc kubenswrapper[4922]: I1128 07:07:49.007413 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-knlzd" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerName="registry-server" containerID="cri-o://8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a" gracePeriod=2 Nov 28 07:07:49 crc kubenswrapper[4922]: I1128 07:07:49.891292 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.014131 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp" event={"ID":"77e47301-a519-4efa-bd65-6530957b27a0","Type":"ContainerStarted","Data":"a7b6ee211fade50ac7ef0fa8d0c203d58eb51aa91a5b5f998f59aec1a6d3be3d"} Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.016635 4922 generic.go:334] "Generic (PLEG): container finished" podID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerID="8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a" exitCode=0 Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.016692 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlzd" event={"ID":"5d6b2b54-1eff-4308-91f0-9ce802a9136e","Type":"ContainerDied","Data":"8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a"} Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.016729 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-knlzd" event={"ID":"5d6b2b54-1eff-4308-91f0-9ce802a9136e","Type":"ContainerDied","Data":"c5ed58d3112c5ee54bb6fcd21cab8fdb12b637e8f8b07ec02d3c7b4094481494"} Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.016757 4922 scope.go:117] "RemoveContainer" containerID="8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.016912 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-knlzd" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.038391 4922 scope.go:117] "RemoveContainer" containerID="e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.046799 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf4zq\" (UniqueName: \"kubernetes.io/projected/5d6b2b54-1eff-4308-91f0-9ce802a9136e-kube-api-access-gf4zq\") pod \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.046883 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-utilities\") pod \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.047042 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-catalog-content\") pod \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\" (UID: \"5d6b2b54-1eff-4308-91f0-9ce802a9136e\") " Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.049738 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-utilities" (OuterVolumeSpecName: "utilities") pod "5d6b2b54-1eff-4308-91f0-9ce802a9136e" (UID: "5d6b2b54-1eff-4308-91f0-9ce802a9136e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.059077 4922 scope.go:117] "RemoveContainer" containerID="aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.065686 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d6b2b54-1eff-4308-91f0-9ce802a9136e-kube-api-access-gf4zq" (OuterVolumeSpecName: "kube-api-access-gf4zq") pod "5d6b2b54-1eff-4308-91f0-9ce802a9136e" (UID: "5d6b2b54-1eff-4308-91f0-9ce802a9136e"). InnerVolumeSpecName "kube-api-access-gf4zq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.068700 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5d6b2b54-1eff-4308-91f0-9ce802a9136e" (UID: "5d6b2b54-1eff-4308-91f0-9ce802a9136e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.105693 4922 scope.go:117] "RemoveContainer" containerID="8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a" Nov 28 07:07:50 crc kubenswrapper[4922]: E1128 07:07:50.106630 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a\": container with ID starting with 8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a not found: ID does not exist" containerID="8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.106706 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a"} err="failed to get container status \"8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a\": rpc error: code = NotFound desc = could not find container \"8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a\": container with ID starting with 8c43e98cf448f21c8fe148d4a16caf1c85bf7f81e1b82344a75c969895dc583a not found: ID does not exist" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.106744 4922 scope.go:117] "RemoveContainer" containerID="e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44" Nov 28 07:07:50 crc kubenswrapper[4922]: E1128 07:07:50.107246 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44\": container with ID starting with e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44 not found: ID does not exist" containerID="e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.107288 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44"} err="failed to get container status \"e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44\": rpc error: code = NotFound desc = could not find container \"e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44\": container with ID starting with e89bb154365cedf0c29a6fd449bfa498f62bfb332627412a570d3b548808ed44 not found: ID does not exist" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.107314 4922 scope.go:117] "RemoveContainer" containerID="aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c" Nov 28 07:07:50 crc kubenswrapper[4922]: E1128 07:07:50.107756 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c\": container with ID starting with aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c not found: ID does not exist" containerID="aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.107774 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c"} err="failed to get container status \"aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c\": rpc error: code = NotFound desc = could not find container \"aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c\": container with ID starting with aad1c31bd8e48881f6f83f74c64f67bdc922e6059f9ff4641b90857be4dc3c4c not found: ID does not exist" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.148835 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.148889 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf4zq\" (UniqueName: \"kubernetes.io/projected/5d6b2b54-1eff-4308-91f0-9ce802a9136e-kube-api-access-gf4zq\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.149021 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d6b2b54-1eff-4308-91f0-9ce802a9136e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.340897 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-khmcp" podStartSLOduration=2.205380278 podStartE2EDuration="4.340862643s" podCreationTimestamp="2025-11-28 07:07:46 +0000 UTC" firstStartedPulling="2025-11-28 07:07:46.79195231 +0000 UTC m=+911.712347902" lastFinishedPulling="2025-11-28 07:07:48.927434685 +0000 UTC m=+913.847830267" observedRunningTime="2025-11-28 07:07:50.035669507 +0000 UTC m=+914.956065109" watchObservedRunningTime="2025-11-28 07:07:50.340862643 +0000 UTC m=+915.261258285" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.351213 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlzd"] Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.357134 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-knlzd"] Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.408491 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.408782 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:50 crc kubenswrapper[4922]: I1128 07:07:50.449334 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:51 crc kubenswrapper[4922]: I1128 07:07:51.065346 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:51 crc kubenswrapper[4922]: I1128 07:07:51.408415 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" path="/var/lib/kubelet/pods/5d6b2b54-1eff-4308-91f0-9ce802a9136e/volumes" Nov 28 07:07:54 crc kubenswrapper[4922]: I1128 07:07:54.024861 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x4k7x"] Nov 28 07:07:54 crc kubenswrapper[4922]: I1128 07:07:54.040351 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-x4k7x" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerName="registry-server" containerID="cri-o://a29406c6ef58a2f4587518ee10a51367b47bac4571513338c494c76b30ddc3ea" gracePeriod=2 Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.337353 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r"] Nov 28 07:07:56 crc kubenswrapper[4922]: E1128 07:07:56.337821 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerName="registry-server" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.337833 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerName="registry-server" Nov 28 07:07:56 crc kubenswrapper[4922]: E1128 07:07:56.337846 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerName="extract-content" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.337852 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerName="extract-content" Nov 28 07:07:56 crc kubenswrapper[4922]: E1128 07:07:56.337865 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerName="extract-utilities" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.337874 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerName="extract-utilities" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.337967 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d6b2b54-1eff-4308-91f0-9ce802a9136e" containerName="registry-server" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.338529 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.340682 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-ppl6g" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.354068 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj"] Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.355264 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.356806 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.359745 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r"] Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.386607 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-btcsz"] Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.387555 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.405344 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj"] Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.429069 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4wkn\" (UniqueName: \"kubernetes.io/projected/bda6cb3f-c411-41a1-8c49-338ac07f8193-kube-api-access-l4wkn\") pod \"nmstate-metrics-7f946cbc9-fsj5r\" (UID: \"bda6cb3f-c411-41a1-8c49-338ac07f8193\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.473414 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7"] Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.479687 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.481986 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.485417 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7"] Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.485626 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-fhq2r" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.485631 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.530188 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-ovs-socket\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.530358 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42zs5\" (UniqueName: \"kubernetes.io/projected/13075e30-8b1b-484b-9808-c2912780a009-kube-api-access-42zs5\") pod \"nmstate-webhook-5f6d4c5ccb-57bpj\" (UID: \"13075e30-8b1b-484b-9808-c2912780a009\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.530405 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-nmstate-lock\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.530848 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-57bpj\" (UID: \"13075e30-8b1b-484b-9808-c2912780a009\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.530875 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9q2kn\" (UniqueName: \"kubernetes.io/projected/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-kube-api-access-9q2kn\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.531017 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4wkn\" (UniqueName: \"kubernetes.io/projected/bda6cb3f-c411-41a1-8c49-338ac07f8193-kube-api-access-l4wkn\") pod \"nmstate-metrics-7f946cbc9-fsj5r\" (UID: \"bda6cb3f-c411-41a1-8c49-338ac07f8193\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.531088 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-dbus-socket\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.549310 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4wkn\" (UniqueName: \"kubernetes.io/projected/bda6cb3f-c411-41a1-8c49-338ac07f8193-kube-api-access-l4wkn\") pod \"nmstate-metrics-7f946cbc9-fsj5r\" (UID: \"bda6cb3f-c411-41a1-8c49-338ac07f8193\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.632808 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9q2kn\" (UniqueName: \"kubernetes.io/projected/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-kube-api-access-9q2kn\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.632870 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-dbus-socket\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.632916 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkwjm\" (UniqueName: \"kubernetes.io/projected/bb22bb64-4624-4718-9785-1dc7c3c125e4-kube-api-access-jkwjm\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.632943 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-ovs-socket\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.632972 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42zs5\" (UniqueName: \"kubernetes.io/projected/13075e30-8b1b-484b-9808-c2912780a009-kube-api-access-42zs5\") pod \"nmstate-webhook-5f6d4c5ccb-57bpj\" (UID: \"13075e30-8b1b-484b-9808-c2912780a009\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.632996 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bb22bb64-4624-4718-9785-1dc7c3c125e4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.633018 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.633040 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-ovs-socket\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.633055 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-nmstate-lock\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.633099 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-nmstate-lock\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.633203 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-57bpj\" (UID: \"13075e30-8b1b-484b-9808-c2912780a009\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:56 crc kubenswrapper[4922]: E1128 07:07:56.633329 4922 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 28 07:07:56 crc kubenswrapper[4922]: E1128 07:07:56.633382 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair podName:13075e30-8b1b-484b-9808-c2912780a009 nodeName:}" failed. No retries permitted until 2025-11-28 07:07:57.133361247 +0000 UTC m=+922.053756839 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-57bpj" (UID: "13075e30-8b1b-484b-9808-c2912780a009") : secret "openshift-nmstate-webhook" not found Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.633323 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-dbus-socket\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.650258 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42zs5\" (UniqueName: \"kubernetes.io/projected/13075e30-8b1b-484b-9808-c2912780a009-kube-api-access-42zs5\") pod \"nmstate-webhook-5f6d4c5ccb-57bpj\" (UID: \"13075e30-8b1b-484b-9808-c2912780a009\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.664567 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.734026 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkwjm\" (UniqueName: \"kubernetes.io/projected/bb22bb64-4624-4718-9785-1dc7c3c125e4-kube-api-access-jkwjm\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.734095 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bb22bb64-4624-4718-9785-1dc7c3c125e4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.734123 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: E1128 07:07:56.734330 4922 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 28 07:07:56 crc kubenswrapper[4922]: E1128 07:07:56.734395 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert podName:bb22bb64-4624-4718-9785-1dc7c3c125e4 nodeName:}" failed. No retries permitted until 2025-11-28 07:07:57.234369422 +0000 UTC m=+922.154765004 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-d7sl7" (UID: "bb22bb64-4624-4718-9785-1dc7c3c125e4") : secret "plugin-serving-cert" not found Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.735334 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/bb22bb64-4624-4718-9785-1dc7c3c125e4-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.756182 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkwjm\" (UniqueName: \"kubernetes.io/projected/bb22bb64-4624-4718-9785-1dc7c3c125e4-kube-api-access-jkwjm\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:56 crc kubenswrapper[4922]: I1128 07:07:56.859096 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r"] Nov 28 07:07:57 crc kubenswrapper[4922]: I1128 07:07:57.069363 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" event={"ID":"bda6cb3f-c411-41a1-8c49-338ac07f8193","Type":"ContainerStarted","Data":"93c6503aed18a5d407e74a029f7c97ad10c4ae95c2b0031a15605e16f9e210c4"} Nov 28 07:07:57 crc kubenswrapper[4922]: I1128 07:07:57.140084 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-57bpj\" (UID: \"13075e30-8b1b-484b-9808-c2912780a009\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:57 crc kubenswrapper[4922]: E1128 07:07:57.140447 4922 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 28 07:07:57 crc kubenswrapper[4922]: E1128 07:07:57.140542 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair podName:13075e30-8b1b-484b-9808-c2912780a009 nodeName:}" failed. No retries permitted until 2025-11-28 07:07:58.140521702 +0000 UTC m=+923.060917294 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-57bpj" (UID: "13075e30-8b1b-484b-9808-c2912780a009") : secret "openshift-nmstate-webhook" not found Nov 28 07:07:57 crc kubenswrapper[4922]: I1128 07:07:57.241735 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:57 crc kubenswrapper[4922]: E1128 07:07:57.241988 4922 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 28 07:07:57 crc kubenswrapper[4922]: E1128 07:07:57.242098 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert podName:bb22bb64-4624-4718-9785-1dc7c3c125e4 nodeName:}" failed. No retries permitted until 2025-11-28 07:07:58.242072203 +0000 UTC m=+923.162467815 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-d7sl7" (UID: "bb22bb64-4624-4718-9785-1dc7c3c125e4") : secret "plugin-serving-cert" not found Nov 28 07:07:57 crc kubenswrapper[4922]: I1128 07:07:57.914725 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9q2kn\" (UniqueName: \"kubernetes.io/projected/59fb61f1-8020-40e3-b397-d6ffa8fc74e4-kube-api-access-9q2kn\") pod \"nmstate-handler-btcsz\" (UID: \"59fb61f1-8020-40e3-b397-d6ffa8fc74e4\") " pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:57 crc kubenswrapper[4922]: I1128 07:07:57.947462 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-57d9cc5497-klmfx"] Nov 28 07:07:57 crc kubenswrapper[4922]: I1128 07:07:57.948278 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:57 crc kubenswrapper[4922]: I1128 07:07:57.979351 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-57d9cc5497-klmfx"] Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.003624 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-oauth-serving-cert\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.004050 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4h8j\" (UniqueName: \"kubernetes.io/projected/b49052da-024e-4938-a416-13a73ba80347-kube-api-access-s4h8j\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.004104 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b49052da-024e-4938-a416-13a73ba80347-console-oauth-config\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.004137 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-service-ca\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.004242 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b49052da-024e-4938-a416-13a73ba80347-console-serving-cert\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.004287 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-console-config\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.004361 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-trusted-ca-bundle\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.105499 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-oauth-serving-cert\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.105584 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4h8j\" (UniqueName: \"kubernetes.io/projected/b49052da-024e-4938-a416-13a73ba80347-kube-api-access-s4h8j\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.105631 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b49052da-024e-4938-a416-13a73ba80347-console-oauth-config\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.105721 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-service-ca\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.106007 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b49052da-024e-4938-a416-13a73ba80347-console-serving-cert\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.106934 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-console-config\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.107025 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-trusted-ca-bundle\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.107412 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-service-ca\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.107725 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-console-config\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.108432 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-oauth-serving-cert\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.109126 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b49052da-024e-4938-a416-13a73ba80347-trusted-ca-bundle\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.110380 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b49052da-024e-4938-a416-13a73ba80347-console-oauth-config\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.112635 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b49052da-024e-4938-a416-13a73ba80347-console-serving-cert\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.119745 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4h8j\" (UniqueName: \"kubernetes.io/projected/b49052da-024e-4938-a416-13a73ba80347-kube-api-access-s4h8j\") pod \"console-57d9cc5497-klmfx\" (UID: \"b49052da-024e-4938-a416-13a73ba80347\") " pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.205293 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.207681 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-57bpj\" (UID: \"13075e30-8b1b-484b-9808-c2912780a009\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.213628 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/13075e30-8b1b-484b-9808-c2912780a009-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-57bpj\" (UID: \"13075e30-8b1b-484b-9808-c2912780a009\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:58 crc kubenswrapper[4922]: W1128 07:07:58.234066 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59fb61f1_8020_40e3_b397_d6ffa8fc74e4.slice/crio-e4e93df240b37ded6eabec04d871650667eb1b7b82e38c813c1f033f62dac430 WatchSource:0}: Error finding container e4e93df240b37ded6eabec04d871650667eb1b7b82e38c813c1f033f62dac430: Status 404 returned error can't find the container with id e4e93df240b37ded6eabec04d871650667eb1b7b82e38c813c1f033f62dac430 Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.242039 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gxd22"] Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.243123 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.246429 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gxd22"] Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.274488 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.308610 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nllk4\" (UniqueName: \"kubernetes.io/projected/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-kube-api-access-nllk4\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.308705 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-utilities\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.308773 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.308815 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-catalog-content\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.311850 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb22bb64-4624-4718-9785-1dc7c3c125e4-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-d7sl7\" (UID: \"bb22bb64-4624-4718-9785-1dc7c3c125e4\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.410524 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-catalog-content\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.410837 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nllk4\" (UniqueName: \"kubernetes.io/projected/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-kube-api-access-nllk4\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.410863 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-utilities\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.411140 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-catalog-content\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.411902 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-utilities\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.433235 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nllk4\" (UniqueName: \"kubernetes.io/projected/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-kube-api-access-nllk4\") pod \"certified-operators-gxd22\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.471131 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-57d9cc5497-klmfx"] Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.488343 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:07:58 crc kubenswrapper[4922]: W1128 07:07:58.493791 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb49052da_024e_4938_a416_13a73ba80347.slice/crio-b9d0faca4e745d2209badcfc2b6c68d00345ce9a2b591cc03f28f091e1e86ac7 WatchSource:0}: Error finding container b9d0faca4e745d2209badcfc2b6c68d00345ce9a2b591cc03f28f091e1e86ac7: Status 404 returned error can't find the container with id b9d0faca4e745d2209badcfc2b6c68d00345ce9a2b591cc03f28f091e1e86ac7 Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.569522 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.589360 4922 generic.go:334] "Generic (PLEG): container finished" podID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerID="a29406c6ef58a2f4587518ee10a51367b47bac4571513338c494c76b30ddc3ea" exitCode=0 Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.589398 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x4k7x" event={"ID":"e0550718-6eeb-42e1-890f-2ff3144a2185","Type":"ContainerDied","Data":"a29406c6ef58a2f4587518ee10a51367b47bac4571513338c494c76b30ddc3ea"} Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.599483 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.731591 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj"] Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.887465 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gxd22"] Nov 28 07:07:58 crc kubenswrapper[4922]: W1128 07:07:58.891978 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6515822_5a2f_406a_bfb9_8e98c01b4a1e.slice/crio-69d0fb3bdfd3328472f62adf52aa8e00c890e18c58b3d04002cd3e3154b64881 WatchSource:0}: Error finding container 69d0fb3bdfd3328472f62adf52aa8e00c890e18c58b3d04002cd3e3154b64881: Status 404 returned error can't find the container with id 69d0fb3bdfd3328472f62adf52aa8e00c890e18c58b3d04002cd3e3154b64881 Nov 28 07:07:58 crc kubenswrapper[4922]: I1128 07:07:58.906183 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7"] Nov 28 07:07:58 crc kubenswrapper[4922]: W1128 07:07:58.916399 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb22bb64_4624_4718_9785_1dc7c3c125e4.slice/crio-264e55e79b464bee59b7ecdf8753b00cc495b4b28c97bc4cfbf47332bad91697 WatchSource:0}: Error finding container 264e55e79b464bee59b7ecdf8753b00cc495b4b28c97bc4cfbf47332bad91697: Status 404 returned error can't find the container with id 264e55e79b464bee59b7ecdf8753b00cc495b4b28c97bc4cfbf47332bad91697 Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.135344 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.223296 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-catalog-content\") pod \"e0550718-6eeb-42e1-890f-2ff3144a2185\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.223414 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-utilities\") pod \"e0550718-6eeb-42e1-890f-2ff3144a2185\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.223441 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-982nq\" (UniqueName: \"kubernetes.io/projected/e0550718-6eeb-42e1-890f-2ff3144a2185-kube-api-access-982nq\") pod \"e0550718-6eeb-42e1-890f-2ff3144a2185\" (UID: \"e0550718-6eeb-42e1-890f-2ff3144a2185\") " Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.224500 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-utilities" (OuterVolumeSpecName: "utilities") pod "e0550718-6eeb-42e1-890f-2ff3144a2185" (UID: "e0550718-6eeb-42e1-890f-2ff3144a2185"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.258870 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0550718-6eeb-42e1-890f-2ff3144a2185-kube-api-access-982nq" (OuterVolumeSpecName: "kube-api-access-982nq") pod "e0550718-6eeb-42e1-890f-2ff3144a2185" (UID: "e0550718-6eeb-42e1-890f-2ff3144a2185"). InnerVolumeSpecName "kube-api-access-982nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.324053 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e0550718-6eeb-42e1-890f-2ff3144a2185" (UID: "e0550718-6eeb-42e1-890f-2ff3144a2185"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.324289 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.324311 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0550718-6eeb-42e1-890f-2ff3144a2185-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.324323 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-982nq\" (UniqueName: \"kubernetes.io/projected/e0550718-6eeb-42e1-890f-2ff3144a2185-kube-api-access-982nq\") on node \"crc\" DevicePath \"\"" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.605131 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x4k7x" event={"ID":"e0550718-6eeb-42e1-890f-2ff3144a2185","Type":"ContainerDied","Data":"1d234fcc791f3b0df599657063468da9bf33604ab3708011c5a8f60ff73b7682"} Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.605142 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x4k7x" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.605396 4922 scope.go:117] "RemoveContainer" containerID="a29406c6ef58a2f4587518ee10a51367b47bac4571513338c494c76b30ddc3ea" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.609581 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-btcsz" event={"ID":"59fb61f1-8020-40e3-b397-d6ffa8fc74e4","Type":"ContainerStarted","Data":"e4e93df240b37ded6eabec04d871650667eb1b7b82e38c813c1f033f62dac430"} Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.611484 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxd22" event={"ID":"c6515822-5a2f-406a-bfb9-8e98c01b4a1e","Type":"ContainerStarted","Data":"69d0fb3bdfd3328472f62adf52aa8e00c890e18c58b3d04002cd3e3154b64881"} Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.614431 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" event={"ID":"13075e30-8b1b-484b-9808-c2912780a009","Type":"ContainerStarted","Data":"1bd32718f7e5c1aa190c69cc747640efe62ffa0084d20ea3721dedfc502914bc"} Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.622616 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-57d9cc5497-klmfx" event={"ID":"b49052da-024e-4938-a416-13a73ba80347","Type":"ContainerStarted","Data":"6f41394e9f0c97deacbd43762db7c5482beeee07115a04e8fd12139905f0448d"} Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.622671 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-57d9cc5497-klmfx" event={"ID":"b49052da-024e-4938-a416-13a73ba80347","Type":"ContainerStarted","Data":"b9d0faca4e745d2209badcfc2b6c68d00345ce9a2b591cc03f28f091e1e86ac7"} Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.623669 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" event={"ID":"bb22bb64-4624-4718-9785-1dc7c3c125e4","Type":"ContainerStarted","Data":"264e55e79b464bee59b7ecdf8753b00cc495b4b28c97bc4cfbf47332bad91697"} Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.629944 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x4k7x"] Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.631912 4922 scope.go:117] "RemoveContainer" containerID="58a9688ec44f04f27b831605b25c08464f68c2db1207d1e6a317edc15072e9b0" Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.634833 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-x4k7x"] Nov 28 07:07:59 crc kubenswrapper[4922]: I1128 07:07:59.651400 4922 scope.go:117] "RemoveContainer" containerID="1da1c5486074eba6c9d4924e23685b960435df3e0a8695e11d1ccb3551ad74a7" Nov 28 07:08:00 crc kubenswrapper[4922]: I1128 07:08:00.636179 4922 generic.go:334] "Generic (PLEG): container finished" podID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerID="48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65" exitCode=0 Nov 28 07:08:00 crc kubenswrapper[4922]: I1128 07:08:00.636341 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxd22" event={"ID":"c6515822-5a2f-406a-bfb9-8e98c01b4a1e","Type":"ContainerDied","Data":"48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65"} Nov 28 07:08:00 crc kubenswrapper[4922]: I1128 07:08:00.665718 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-57d9cc5497-klmfx" podStartSLOduration=3.6656977299999998 podStartE2EDuration="3.66569773s" podCreationTimestamp="2025-11-28 07:07:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:07:59.651733839 +0000 UTC m=+924.572129441" watchObservedRunningTime="2025-11-28 07:08:00.66569773 +0000 UTC m=+925.586093342" Nov 28 07:08:01 crc kubenswrapper[4922]: I1128 07:08:01.405673 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" path="/var/lib/kubelet/pods/e0550718-6eeb-42e1-890f-2ff3144a2185/volumes" Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.677619 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-btcsz" event={"ID":"59fb61f1-8020-40e3-b397-d6ffa8fc74e4","Type":"ContainerStarted","Data":"b481525fe320119acfe237e2906ddf7858550a55067a433ddba9a33884eaaf93"} Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.678128 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.679327 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" event={"ID":"bda6cb3f-c411-41a1-8c49-338ac07f8193","Type":"ContainerStarted","Data":"3060255dbc547ffe8a399ab00b952dbbdffc41d8c82068dfacbd172ee1fff32f"} Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.682160 4922 generic.go:334] "Generic (PLEG): container finished" podID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerID="f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c" exitCode=0 Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.682194 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxd22" event={"ID":"c6515822-5a2f-406a-bfb9-8e98c01b4a1e","Type":"ContainerDied","Data":"f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c"} Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.683785 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" event={"ID":"13075e30-8b1b-484b-9808-c2912780a009","Type":"ContainerStarted","Data":"206953bc915e5f7898ec5a3f3c558d5622c81748f0ca8d70fe34b929439b29e2"} Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.683936 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.685869 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" event={"ID":"bb22bb64-4624-4718-9785-1dc7c3c125e4","Type":"ContainerStarted","Data":"5887227f6c893c63927846fc837b4080bc31bbbcd03e829efd6d6d71577bb022"} Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.700962 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-btcsz" podStartSLOduration=3.829697536 podStartE2EDuration="10.700938011s" podCreationTimestamp="2025-11-28 07:07:56 +0000 UTC" firstStartedPulling="2025-11-28 07:07:58.23789855 +0000 UTC m=+923.158294162" lastFinishedPulling="2025-11-28 07:08:05.109139055 +0000 UTC m=+930.029534637" observedRunningTime="2025-11-28 07:08:06.69389559 +0000 UTC m=+931.614291182" watchObservedRunningTime="2025-11-28 07:08:06.700938011 +0000 UTC m=+931.621333643" Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.710917 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" podStartSLOduration=4.351644933 podStartE2EDuration="10.710897952s" podCreationTimestamp="2025-11-28 07:07:56 +0000 UTC" firstStartedPulling="2025-11-28 07:07:58.749410463 +0000 UTC m=+923.669806045" lastFinishedPulling="2025-11-28 07:08:05.108663482 +0000 UTC m=+930.029059064" observedRunningTime="2025-11-28 07:08:06.710064499 +0000 UTC m=+931.630460121" watchObservedRunningTime="2025-11-28 07:08:06.710897952 +0000 UTC m=+931.631293564" Nov 28 07:08:06 crc kubenswrapper[4922]: I1128 07:08:06.725036 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-d7sl7" podStartSLOduration=3.781325001 podStartE2EDuration="10.725021676s" podCreationTimestamp="2025-11-28 07:07:56 +0000 UTC" firstStartedPulling="2025-11-28 07:07:58.919432804 +0000 UTC m=+923.839828426" lastFinishedPulling="2025-11-28 07:08:05.863129519 +0000 UTC m=+930.783525101" observedRunningTime="2025-11-28 07:08:06.721085108 +0000 UTC m=+931.641480690" watchObservedRunningTime="2025-11-28 07:08:06.725021676 +0000 UTC m=+931.645417258" Nov 28 07:08:08 crc kubenswrapper[4922]: I1128 07:08:08.275275 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:08:08 crc kubenswrapper[4922]: I1128 07:08:08.275562 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:08:08 crc kubenswrapper[4922]: I1128 07:08:08.280993 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:08:08 crc kubenswrapper[4922]: I1128 07:08:08.706104 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxd22" event={"ID":"c6515822-5a2f-406a-bfb9-8e98c01b4a1e","Type":"ContainerStarted","Data":"a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c"} Nov 28 07:08:08 crc kubenswrapper[4922]: I1128 07:08:08.711142 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-57d9cc5497-klmfx" Nov 28 07:08:08 crc kubenswrapper[4922]: I1128 07:08:08.784406 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-h297g"] Nov 28 07:08:09 crc kubenswrapper[4922]: I1128 07:08:09.753671 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gxd22" podStartSLOduration=4.03397755 podStartE2EDuration="11.753639416s" podCreationTimestamp="2025-11-28 07:07:58 +0000 UTC" firstStartedPulling="2025-11-28 07:08:00.638832679 +0000 UTC m=+925.559228301" lastFinishedPulling="2025-11-28 07:08:08.358494585 +0000 UTC m=+933.278890167" observedRunningTime="2025-11-28 07:08:09.751099707 +0000 UTC m=+934.671495299" watchObservedRunningTime="2025-11-28 07:08:09.753639416 +0000 UTC m=+934.674035058" Nov 28 07:08:13 crc kubenswrapper[4922]: I1128 07:08:13.232739 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-btcsz" Nov 28 07:08:13 crc kubenswrapper[4922]: I1128 07:08:13.753688 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" event={"ID":"bda6cb3f-c411-41a1-8c49-338ac07f8193","Type":"ContainerStarted","Data":"7aa7e5f411a383c21ac44d4d5de4216461d141d78bf7436e29b69b5c77b8668a"} Nov 28 07:08:13 crc kubenswrapper[4922]: I1128 07:08:13.794310 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-fsj5r" podStartSLOduration=1.6414742580000001 podStartE2EDuration="17.794284173s" podCreationTimestamp="2025-11-28 07:07:56 +0000 UTC" firstStartedPulling="2025-11-28 07:07:56.870263186 +0000 UTC m=+921.790658778" lastFinishedPulling="2025-11-28 07:08:13.023073111 +0000 UTC m=+937.943468693" observedRunningTime="2025-11-28 07:08:13.780627642 +0000 UTC m=+938.701023244" watchObservedRunningTime="2025-11-28 07:08:13.794284173 +0000 UTC m=+938.714679765" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.649575 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qcs6d"] Nov 28 07:08:15 crc kubenswrapper[4922]: E1128 07:08:15.650120 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerName="extract-utilities" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.650135 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerName="extract-utilities" Nov 28 07:08:15 crc kubenswrapper[4922]: E1128 07:08:15.650156 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerName="extract-content" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.650165 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerName="extract-content" Nov 28 07:08:15 crc kubenswrapper[4922]: E1128 07:08:15.650181 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerName="registry-server" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.650191 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerName="registry-server" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.650348 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0550718-6eeb-42e1-890f-2ff3144a2185" containerName="registry-server" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.651259 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.668154 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qcs6d"] Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.794713 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtgjf\" (UniqueName: \"kubernetes.io/projected/3148a6c1-4db9-426f-84d1-02016ce27571-kube-api-access-wtgjf\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.794763 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-utilities\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.794809 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-catalog-content\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.896569 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-catalog-content\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.896712 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtgjf\" (UniqueName: \"kubernetes.io/projected/3148a6c1-4db9-426f-84d1-02016ce27571-kube-api-access-wtgjf\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.896776 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-utilities\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.897102 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-catalog-content\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.897134 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-utilities\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.930628 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtgjf\" (UniqueName: \"kubernetes.io/projected/3148a6c1-4db9-426f-84d1-02016ce27571-kube-api-access-wtgjf\") pod \"community-operators-qcs6d\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:15 crc kubenswrapper[4922]: I1128 07:08:15.983410 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:16 crc kubenswrapper[4922]: I1128 07:08:16.411037 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qcs6d"] Nov 28 07:08:16 crc kubenswrapper[4922]: W1128 07:08:16.422531 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3148a6c1_4db9_426f_84d1_02016ce27571.slice/crio-21847257c256ead5ac4a7e52262b6cd9636cccbf5a2dbc21d70db56bf2539f51 WatchSource:0}: Error finding container 21847257c256ead5ac4a7e52262b6cd9636cccbf5a2dbc21d70db56bf2539f51: Status 404 returned error can't find the container with id 21847257c256ead5ac4a7e52262b6cd9636cccbf5a2dbc21d70db56bf2539f51 Nov 28 07:08:16 crc kubenswrapper[4922]: I1128 07:08:16.771872 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qcs6d" event={"ID":"3148a6c1-4db9-426f-84d1-02016ce27571","Type":"ContainerStarted","Data":"21847257c256ead5ac4a7e52262b6cd9636cccbf5a2dbc21d70db56bf2539f51"} Nov 28 07:08:17 crc kubenswrapper[4922]: I1128 07:08:17.778075 4922 generic.go:334] "Generic (PLEG): container finished" podID="3148a6c1-4db9-426f-84d1-02016ce27571" containerID="aabaf1c1bc87736b48b36d9d97368c379a4ae9c5deb3ef714a3dc55dc158832b" exitCode=0 Nov 28 07:08:17 crc kubenswrapper[4922]: I1128 07:08:17.778175 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qcs6d" event={"ID":"3148a6c1-4db9-426f-84d1-02016ce27571","Type":"ContainerDied","Data":"aabaf1c1bc87736b48b36d9d97368c379a4ae9c5deb3ef714a3dc55dc158832b"} Nov 28 07:08:18 crc kubenswrapper[4922]: I1128 07:08:18.495397 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-57bpj" Nov 28 07:08:18 crc kubenswrapper[4922]: I1128 07:08:18.570537 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:08:18 crc kubenswrapper[4922]: I1128 07:08:18.570605 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:08:18 crc kubenswrapper[4922]: I1128 07:08:18.614867 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:08:18 crc kubenswrapper[4922]: I1128 07:08:18.843798 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:08:19 crc kubenswrapper[4922]: I1128 07:08:19.800365 4922 generic.go:334] "Generic (PLEG): container finished" podID="3148a6c1-4db9-426f-84d1-02016ce27571" containerID="a610792a5a631bb9253cb766596276b1b7f26ce6f22e8196de8e95743b03d9b2" exitCode=0 Nov 28 07:08:19 crc kubenswrapper[4922]: I1128 07:08:19.800418 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qcs6d" event={"ID":"3148a6c1-4db9-426f-84d1-02016ce27571","Type":"ContainerDied","Data":"a610792a5a631bb9253cb766596276b1b7f26ce6f22e8196de8e95743b03d9b2"} Nov 28 07:08:20 crc kubenswrapper[4922]: I1128 07:08:20.025062 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gxd22"] Nov 28 07:08:20 crc kubenswrapper[4922]: I1128 07:08:20.810582 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qcs6d" event={"ID":"3148a6c1-4db9-426f-84d1-02016ce27571","Type":"ContainerStarted","Data":"78d02210180381e9f63a28d1432913c3f1ea4d258214398a279e7a9178738039"} Nov 28 07:08:20 crc kubenswrapper[4922]: I1128 07:08:20.810706 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gxd22" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerName="registry-server" containerID="cri-o://a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c" gracePeriod=2 Nov 28 07:08:20 crc kubenswrapper[4922]: I1128 07:08:20.845456 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qcs6d" podStartSLOduration=3.250645921 podStartE2EDuration="5.845431148s" podCreationTimestamp="2025-11-28 07:08:15 +0000 UTC" firstStartedPulling="2025-11-28 07:08:17.779835204 +0000 UTC m=+942.700230786" lastFinishedPulling="2025-11-28 07:08:20.374620431 +0000 UTC m=+945.295016013" observedRunningTime="2025-11-28 07:08:20.84292438 +0000 UTC m=+945.763319972" watchObservedRunningTime="2025-11-28 07:08:20.845431148 +0000 UTC m=+945.765826740" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.217282 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.370509 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-catalog-content\") pod \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.370828 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nllk4\" (UniqueName: \"kubernetes.io/projected/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-kube-api-access-nllk4\") pod \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.370923 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-utilities\") pod \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\" (UID: \"c6515822-5a2f-406a-bfb9-8e98c01b4a1e\") " Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.371836 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-utilities" (OuterVolumeSpecName: "utilities") pod "c6515822-5a2f-406a-bfb9-8e98c01b4a1e" (UID: "c6515822-5a2f-406a-bfb9-8e98c01b4a1e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.378551 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-kube-api-access-nllk4" (OuterVolumeSpecName: "kube-api-access-nllk4") pod "c6515822-5a2f-406a-bfb9-8e98c01b4a1e" (UID: "c6515822-5a2f-406a-bfb9-8e98c01b4a1e"). InnerVolumeSpecName "kube-api-access-nllk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.414141 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c6515822-5a2f-406a-bfb9-8e98c01b4a1e" (UID: "c6515822-5a2f-406a-bfb9-8e98c01b4a1e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.473976 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.474012 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nllk4\" (UniqueName: \"kubernetes.io/projected/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-kube-api-access-nllk4\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.474030 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6515822-5a2f-406a-bfb9-8e98c01b4a1e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.819953 4922 generic.go:334] "Generic (PLEG): container finished" podID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerID="a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c" exitCode=0 Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.820027 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxd22" event={"ID":"c6515822-5a2f-406a-bfb9-8e98c01b4a1e","Type":"ContainerDied","Data":"a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c"} Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.820329 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxd22" event={"ID":"c6515822-5a2f-406a-bfb9-8e98c01b4a1e","Type":"ContainerDied","Data":"69d0fb3bdfd3328472f62adf52aa8e00c890e18c58b3d04002cd3e3154b64881"} Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.820362 4922 scope.go:117] "RemoveContainer" containerID="a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.820089 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gxd22" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.849919 4922 scope.go:117] "RemoveContainer" containerID="f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.855478 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gxd22"] Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.861402 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gxd22"] Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.881707 4922 scope.go:117] "RemoveContainer" containerID="48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.901600 4922 scope.go:117] "RemoveContainer" containerID="a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c" Nov 28 07:08:21 crc kubenswrapper[4922]: E1128 07:08:21.902141 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c\": container with ID starting with a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c not found: ID does not exist" containerID="a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.902181 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c"} err="failed to get container status \"a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c\": rpc error: code = NotFound desc = could not find container \"a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c\": container with ID starting with a81a41a86cb996033eaa424da6ad46af82cf59eca2571a32af47fb6475e71b2c not found: ID does not exist" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.902209 4922 scope.go:117] "RemoveContainer" containerID="f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c" Nov 28 07:08:21 crc kubenswrapper[4922]: E1128 07:08:21.910585 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c\": container with ID starting with f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c not found: ID does not exist" containerID="f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.910617 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c"} err="failed to get container status \"f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c\": rpc error: code = NotFound desc = could not find container \"f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c\": container with ID starting with f1f5e82df45e32741cfb275c36628b8f0a4998417bf0d1c355a5c20639c0ae3c not found: ID does not exist" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.910635 4922 scope.go:117] "RemoveContainer" containerID="48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65" Nov 28 07:08:21 crc kubenswrapper[4922]: E1128 07:08:21.910937 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65\": container with ID starting with 48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65 not found: ID does not exist" containerID="48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65" Nov 28 07:08:21 crc kubenswrapper[4922]: I1128 07:08:21.910962 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65"} err="failed to get container status \"48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65\": rpc error: code = NotFound desc = could not find container \"48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65\": container with ID starting with 48b030e65b068811666dd72e5b806c7d2834490c3e07abe662eba526fc7bda65 not found: ID does not exist" Nov 28 07:08:23 crc kubenswrapper[4922]: I1128 07:08:23.410974 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" path="/var/lib/kubelet/pods/c6515822-5a2f-406a-bfb9-8e98c01b4a1e/volumes" Nov 28 07:08:25 crc kubenswrapper[4922]: I1128 07:08:25.984262 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:25 crc kubenswrapper[4922]: I1128 07:08:25.984606 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:26 crc kubenswrapper[4922]: I1128 07:08:26.035911 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:26 crc kubenswrapper[4922]: I1128 07:08:26.912529 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:26 crc kubenswrapper[4922]: I1128 07:08:26.962703 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qcs6d"] Nov 28 07:08:27 crc kubenswrapper[4922]: I1128 07:08:27.311743 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:08:27 crc kubenswrapper[4922]: I1128 07:08:27.311826 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:08:28 crc kubenswrapper[4922]: I1128 07:08:28.867946 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qcs6d" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" containerName="registry-server" containerID="cri-o://78d02210180381e9f63a28d1432913c3f1ea4d258214398a279e7a9178738039" gracePeriod=2 Nov 28 07:08:29 crc kubenswrapper[4922]: I1128 07:08:29.880834 4922 generic.go:334] "Generic (PLEG): container finished" podID="3148a6c1-4db9-426f-84d1-02016ce27571" containerID="78d02210180381e9f63a28d1432913c3f1ea4d258214398a279e7a9178738039" exitCode=0 Nov 28 07:08:29 crc kubenswrapper[4922]: I1128 07:08:29.880923 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qcs6d" event={"ID":"3148a6c1-4db9-426f-84d1-02016ce27571","Type":"ContainerDied","Data":"78d02210180381e9f63a28d1432913c3f1ea4d258214398a279e7a9178738039"} Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.407599 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.501586 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-catalog-content\") pod \"3148a6c1-4db9-426f-84d1-02016ce27571\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.501683 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtgjf\" (UniqueName: \"kubernetes.io/projected/3148a6c1-4db9-426f-84d1-02016ce27571-kube-api-access-wtgjf\") pod \"3148a6c1-4db9-426f-84d1-02016ce27571\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.501743 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-utilities\") pod \"3148a6c1-4db9-426f-84d1-02016ce27571\" (UID: \"3148a6c1-4db9-426f-84d1-02016ce27571\") " Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.503753 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-utilities" (OuterVolumeSpecName: "utilities") pod "3148a6c1-4db9-426f-84d1-02016ce27571" (UID: "3148a6c1-4db9-426f-84d1-02016ce27571"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.503976 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.518356 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3148a6c1-4db9-426f-84d1-02016ce27571-kube-api-access-wtgjf" (OuterVolumeSpecName: "kube-api-access-wtgjf") pod "3148a6c1-4db9-426f-84d1-02016ce27571" (UID: "3148a6c1-4db9-426f-84d1-02016ce27571"). InnerVolumeSpecName "kube-api-access-wtgjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.566628 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3148a6c1-4db9-426f-84d1-02016ce27571" (UID: "3148a6c1-4db9-426f-84d1-02016ce27571"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.605082 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3148a6c1-4db9-426f-84d1-02016ce27571-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.605121 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtgjf\" (UniqueName: \"kubernetes.io/projected/3148a6c1-4db9-426f-84d1-02016ce27571-kube-api-access-wtgjf\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.891079 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qcs6d" event={"ID":"3148a6c1-4db9-426f-84d1-02016ce27571","Type":"ContainerDied","Data":"21847257c256ead5ac4a7e52262b6cd9636cccbf5a2dbc21d70db56bf2539f51"} Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.891177 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qcs6d" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.891176 4922 scope.go:117] "RemoveContainer" containerID="78d02210180381e9f63a28d1432913c3f1ea4d258214398a279e7a9178738039" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.917398 4922 scope.go:117] "RemoveContainer" containerID="a610792a5a631bb9253cb766596276b1b7f26ce6f22e8196de8e95743b03d9b2" Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.938123 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qcs6d"] Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.946834 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qcs6d"] Nov 28 07:08:30 crc kubenswrapper[4922]: I1128 07:08:30.992934 4922 scope.go:117] "RemoveContainer" containerID="aabaf1c1bc87736b48b36d9d97368c379a4ae9c5deb3ef714a3dc55dc158832b" Nov 28 07:08:31 crc kubenswrapper[4922]: I1128 07:08:31.405157 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" path="/var/lib/kubelet/pods/3148a6c1-4db9-426f-84d1-02016ce27571/volumes" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.559695 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf"] Nov 28 07:08:32 crc kubenswrapper[4922]: E1128 07:08:32.559977 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerName="registry-server" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.559996 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerName="registry-server" Nov 28 07:08:32 crc kubenswrapper[4922]: E1128 07:08:32.560014 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" containerName="registry-server" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.560027 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" containerName="registry-server" Nov 28 07:08:32 crc kubenswrapper[4922]: E1128 07:08:32.560045 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerName="extract-utilities" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.560059 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerName="extract-utilities" Nov 28 07:08:32 crc kubenswrapper[4922]: E1128 07:08:32.560078 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" containerName="extract-utilities" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.560091 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" containerName="extract-utilities" Nov 28 07:08:32 crc kubenswrapper[4922]: E1128 07:08:32.560110 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerName="extract-content" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.560121 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerName="extract-content" Nov 28 07:08:32 crc kubenswrapper[4922]: E1128 07:08:32.560135 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" containerName="extract-content" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.560148 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" containerName="extract-content" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.560336 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6515822-5a2f-406a-bfb9-8e98c01b4a1e" containerName="registry-server" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.560351 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="3148a6c1-4db9-426f-84d1-02016ce27571" containerName="registry-server" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.561572 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.563892 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.576545 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf"] Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.734968 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.735173 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.735261 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqnm8\" (UniqueName: \"kubernetes.io/projected/57ced773-180b-4ccd-a494-a78a39e66083-kube-api-access-bqnm8\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.837244 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqnm8\" (UniqueName: \"kubernetes.io/projected/57ced773-180b-4ccd-a494-a78a39e66083-kube-api-access-bqnm8\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.837919 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.838025 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.838959 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.838995 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.871268 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqnm8\" (UniqueName: \"kubernetes.io/projected/57ced773-180b-4ccd-a494-a78a39e66083-kube-api-access-bqnm8\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:32 crc kubenswrapper[4922]: I1128 07:08:32.879020 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:33 crc kubenswrapper[4922]: I1128 07:08:33.331450 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf"] Nov 28 07:08:33 crc kubenswrapper[4922]: W1128 07:08:33.342658 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57ced773_180b_4ccd_a494_a78a39e66083.slice/crio-2bfde0b86c22c3e5365f6578eba7b216e63d943fce4e928a697d49c14d37265b WatchSource:0}: Error finding container 2bfde0b86c22c3e5365f6578eba7b216e63d943fce4e928a697d49c14d37265b: Status 404 returned error can't find the container with id 2bfde0b86c22c3e5365f6578eba7b216e63d943fce4e928a697d49c14d37265b Nov 28 07:08:33 crc kubenswrapper[4922]: I1128 07:08:33.828179 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-h297g" podUID="7de83d0f-3269-4343-b2a9-398b8d4af4fc" containerName="console" containerID="cri-o://500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332" gracePeriod=15 Nov 28 07:08:33 crc kubenswrapper[4922]: I1128 07:08:33.911891 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" event={"ID":"57ced773-180b-4ccd-a494-a78a39e66083","Type":"ContainerStarted","Data":"2bfde0b86c22c3e5365f6578eba7b216e63d943fce4e928a697d49c14d37265b"} Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.408271 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-h297g_7de83d0f-3269-4343-b2a9-398b8d4af4fc/console/0.log" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.408614 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h297g" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.558074 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-trusted-ca-bundle\") pod \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.558129 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-service-ca\") pod \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.558194 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-oauth-config\") pod \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.558236 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-serving-cert\") pod \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.558280 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-oauth-serving-cert\") pod \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.558329 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-config\") pod \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.558351 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8552\" (UniqueName: \"kubernetes.io/projected/7de83d0f-3269-4343-b2a9-398b8d4af4fc-kube-api-access-g8552\") pod \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\" (UID: \"7de83d0f-3269-4343-b2a9-398b8d4af4fc\") " Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.559583 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-config" (OuterVolumeSpecName: "console-config") pod "7de83d0f-3269-4343-b2a9-398b8d4af4fc" (UID: "7de83d0f-3269-4343-b2a9-398b8d4af4fc"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.559668 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-service-ca" (OuterVolumeSpecName: "service-ca") pod "7de83d0f-3269-4343-b2a9-398b8d4af4fc" (UID: "7de83d0f-3269-4343-b2a9-398b8d4af4fc"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.559680 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "7de83d0f-3269-4343-b2a9-398b8d4af4fc" (UID: "7de83d0f-3269-4343-b2a9-398b8d4af4fc"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.560924 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "7de83d0f-3269-4343-b2a9-398b8d4af4fc" (UID: "7de83d0f-3269-4343-b2a9-398b8d4af4fc"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.564553 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7de83d0f-3269-4343-b2a9-398b8d4af4fc-kube-api-access-g8552" (OuterVolumeSpecName: "kube-api-access-g8552") pod "7de83d0f-3269-4343-b2a9-398b8d4af4fc" (UID: "7de83d0f-3269-4343-b2a9-398b8d4af4fc"). InnerVolumeSpecName "kube-api-access-g8552". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.564952 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "7de83d0f-3269-4343-b2a9-398b8d4af4fc" (UID: "7de83d0f-3269-4343-b2a9-398b8d4af4fc"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.566044 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "7de83d0f-3269-4343-b2a9-398b8d4af4fc" (UID: "7de83d0f-3269-4343-b2a9-398b8d4af4fc"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.660380 4922 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.660436 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8552\" (UniqueName: \"kubernetes.io/projected/7de83d0f-3269-4343-b2a9-398b8d4af4fc-kube-api-access-g8552\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.660450 4922 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.660462 4922 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.660473 4922 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.660485 4922 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/7de83d0f-3269-4343-b2a9-398b8d4af4fc-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.660497 4922 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/7de83d0f-3269-4343-b2a9-398b8d4af4fc-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.922161 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-h297g_7de83d0f-3269-4343-b2a9-398b8d4af4fc/console/0.log" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.922277 4922 generic.go:334] "Generic (PLEG): container finished" podID="7de83d0f-3269-4343-b2a9-398b8d4af4fc" containerID="500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332" exitCode=2 Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.922375 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h297g" event={"ID":"7de83d0f-3269-4343-b2a9-398b8d4af4fc","Type":"ContainerDied","Data":"500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332"} Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.922414 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-h297g" event={"ID":"7de83d0f-3269-4343-b2a9-398b8d4af4fc","Type":"ContainerDied","Data":"8e889a92d7a0a02a2add9de486cce47ce6ab541fcc50cda3e48aa3cddbb0e595"} Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.922430 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-h297g" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.922454 4922 scope.go:117] "RemoveContainer" containerID="500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.924816 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" event={"ID":"57ced773-180b-4ccd-a494-a78a39e66083","Type":"ContainerStarted","Data":"15c6a8b25a83ea2d3bbb099c0ae74144fa82f94651d4538f6ddef537dacdd9da"} Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.957596 4922 scope.go:117] "RemoveContainer" containerID="500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332" Nov 28 07:08:34 crc kubenswrapper[4922]: E1128 07:08:34.958183 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332\": container with ID starting with 500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332 not found: ID does not exist" containerID="500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.958385 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332"} err="failed to get container status \"500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332\": rpc error: code = NotFound desc = could not find container \"500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332\": container with ID starting with 500c7612909a34326f6a36b7737a0dd0b0b321c52d175bc83973037b802e9332 not found: ID does not exist" Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.974328 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-h297g"] Nov 28 07:08:34 crc kubenswrapper[4922]: I1128 07:08:34.977161 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-h297g"] Nov 28 07:08:35 crc kubenswrapper[4922]: I1128 07:08:35.411056 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7de83d0f-3269-4343-b2a9-398b8d4af4fc" path="/var/lib/kubelet/pods/7de83d0f-3269-4343-b2a9-398b8d4af4fc/volumes" Nov 28 07:08:35 crc kubenswrapper[4922]: I1128 07:08:35.954710 4922 generic.go:334] "Generic (PLEG): container finished" podID="57ced773-180b-4ccd-a494-a78a39e66083" containerID="15c6a8b25a83ea2d3bbb099c0ae74144fa82f94651d4538f6ddef537dacdd9da" exitCode=0 Nov 28 07:08:35 crc kubenswrapper[4922]: I1128 07:08:35.955185 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" event={"ID":"57ced773-180b-4ccd-a494-a78a39e66083","Type":"ContainerDied","Data":"15c6a8b25a83ea2d3bbb099c0ae74144fa82f94651d4538f6ddef537dacdd9da"} Nov 28 07:08:51 crc kubenswrapper[4922]: I1128 07:08:51.058927 4922 generic.go:334] "Generic (PLEG): container finished" podID="57ced773-180b-4ccd-a494-a78a39e66083" containerID="92a31e88c7833c3d1dc707eb0c50bce9c70a19d19adf87f20c415c67ab4b01b5" exitCode=0 Nov 28 07:08:51 crc kubenswrapper[4922]: I1128 07:08:51.059146 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" event={"ID":"57ced773-180b-4ccd-a494-a78a39e66083","Type":"ContainerDied","Data":"92a31e88c7833c3d1dc707eb0c50bce9c70a19d19adf87f20c415c67ab4b01b5"} Nov 28 07:08:52 crc kubenswrapper[4922]: I1128 07:08:52.069083 4922 generic.go:334] "Generic (PLEG): container finished" podID="57ced773-180b-4ccd-a494-a78a39e66083" containerID="3e862b6aa5aaa0acf8e1978d87c67ea9a56fb27432b9ec47b719d3ded645affc" exitCode=0 Nov 28 07:08:52 crc kubenswrapper[4922]: I1128 07:08:52.069299 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" event={"ID":"57ced773-180b-4ccd-a494-a78a39e66083","Type":"ContainerDied","Data":"3e862b6aa5aaa0acf8e1978d87c67ea9a56fb27432b9ec47b719d3ded645affc"} Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.384103 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.415987 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-bundle\") pod \"57ced773-180b-4ccd-a494-a78a39e66083\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.416126 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqnm8\" (UniqueName: \"kubernetes.io/projected/57ced773-180b-4ccd-a494-a78a39e66083-kube-api-access-bqnm8\") pod \"57ced773-180b-4ccd-a494-a78a39e66083\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.416174 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-util\") pod \"57ced773-180b-4ccd-a494-a78a39e66083\" (UID: \"57ced773-180b-4ccd-a494-a78a39e66083\") " Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.417928 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-bundle" (OuterVolumeSpecName: "bundle") pod "57ced773-180b-4ccd-a494-a78a39e66083" (UID: "57ced773-180b-4ccd-a494-a78a39e66083"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.424261 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57ced773-180b-4ccd-a494-a78a39e66083-kube-api-access-bqnm8" (OuterVolumeSpecName: "kube-api-access-bqnm8") pod "57ced773-180b-4ccd-a494-a78a39e66083" (UID: "57ced773-180b-4ccd-a494-a78a39e66083"). InnerVolumeSpecName "kube-api-access-bqnm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.430806 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-util" (OuterVolumeSpecName: "util") pod "57ced773-180b-4ccd-a494-a78a39e66083" (UID: "57ced773-180b-4ccd-a494-a78a39e66083"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.519449 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqnm8\" (UniqueName: \"kubernetes.io/projected/57ced773-180b-4ccd-a494-a78a39e66083-kube-api-access-bqnm8\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.519489 4922 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-util\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:53 crc kubenswrapper[4922]: I1128 07:08:53.519504 4922 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/57ced773-180b-4ccd-a494-a78a39e66083-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:08:54 crc kubenswrapper[4922]: I1128 07:08:54.090370 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" event={"ID":"57ced773-180b-4ccd-a494-a78a39e66083","Type":"ContainerDied","Data":"2bfde0b86c22c3e5365f6578eba7b216e63d943fce4e928a697d49c14d37265b"} Nov 28 07:08:54 crc kubenswrapper[4922]: I1128 07:08:54.090795 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bfde0b86c22c3e5365f6578eba7b216e63d943fce4e928a697d49c14d37265b" Nov 28 07:08:54 crc kubenswrapper[4922]: I1128 07:08:54.090474 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf" Nov 28 07:08:57 crc kubenswrapper[4922]: I1128 07:08:57.311697 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:08:57 crc kubenswrapper[4922]: I1128 07:08:57.312193 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.332674 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w"] Nov 28 07:09:06 crc kubenswrapper[4922]: E1128 07:09:06.333348 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7de83d0f-3269-4343-b2a9-398b8d4af4fc" containerName="console" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.333378 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7de83d0f-3269-4343-b2a9-398b8d4af4fc" containerName="console" Nov 28 07:09:06 crc kubenswrapper[4922]: E1128 07:09:06.333398 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ced773-180b-4ccd-a494-a78a39e66083" containerName="util" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.333411 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ced773-180b-4ccd-a494-a78a39e66083" containerName="util" Nov 28 07:09:06 crc kubenswrapper[4922]: E1128 07:09:06.333432 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ced773-180b-4ccd-a494-a78a39e66083" containerName="extract" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.333458 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ced773-180b-4ccd-a494-a78a39e66083" containerName="extract" Nov 28 07:09:06 crc kubenswrapper[4922]: E1128 07:09:06.333483 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57ced773-180b-4ccd-a494-a78a39e66083" containerName="pull" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.333495 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="57ced773-180b-4ccd-a494-a78a39e66083" containerName="pull" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.333677 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7de83d0f-3269-4343-b2a9-398b8d4af4fc" containerName="console" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.333702 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="57ced773-180b-4ccd-a494-a78a39e66083" containerName="extract" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.334310 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.336299 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.336340 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.336522 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-6tm6f" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.336563 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.336802 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.355621 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w"] Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.444493 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/60818a80-77e8-4256-a958-a6333c763453-apiservice-cert\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.444595 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/60818a80-77e8-4256-a958-a6333c763453-webhook-cert\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.444615 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crgwg\" (UniqueName: \"kubernetes.io/projected/60818a80-77e8-4256-a958-a6333c763453-kube-api-access-crgwg\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.546142 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/60818a80-77e8-4256-a958-a6333c763453-apiservice-cert\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.546244 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/60818a80-77e8-4256-a958-a6333c763453-webhook-cert\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.546274 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crgwg\" (UniqueName: \"kubernetes.io/projected/60818a80-77e8-4256-a958-a6333c763453-kube-api-access-crgwg\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.552786 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/60818a80-77e8-4256-a958-a6333c763453-apiservice-cert\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.561909 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crgwg\" (UniqueName: \"kubernetes.io/projected/60818a80-77e8-4256-a958-a6333c763453-kube-api-access-crgwg\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.561948 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/60818a80-77e8-4256-a958-a6333c763453-webhook-cert\") pod \"metallb-operator-controller-manager-5cdbbf65c9-tnz7w\" (UID: \"60818a80-77e8-4256-a958-a6333c763453\") " pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.580391 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6"] Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.582519 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.630874 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.631176 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-4wglx" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.631337 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.635535 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6"] Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.648067 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9hps\" (UniqueName: \"kubernetes.io/projected/5de70ea0-722a-4efb-8f49-f005ef4f661c-kube-api-access-z9hps\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.648560 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5de70ea0-722a-4efb-8f49-f005ef4f661c-webhook-cert\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.648642 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5de70ea0-722a-4efb-8f49-f005ef4f661c-apiservice-cert\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.649200 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.752142 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5de70ea0-722a-4efb-8f49-f005ef4f661c-webhook-cert\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.752512 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5de70ea0-722a-4efb-8f49-f005ef4f661c-apiservice-cert\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.752569 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9hps\" (UniqueName: \"kubernetes.io/projected/5de70ea0-722a-4efb-8f49-f005ef4f661c-kube-api-access-z9hps\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.759005 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5de70ea0-722a-4efb-8f49-f005ef4f661c-webhook-cert\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.759528 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5de70ea0-722a-4efb-8f49-f005ef4f661c-apiservice-cert\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.791239 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9hps\" (UniqueName: \"kubernetes.io/projected/5de70ea0-722a-4efb-8f49-f005ef4f661c-kube-api-access-z9hps\") pod \"metallb-operator-webhook-server-74d65d85cd-qs7c6\" (UID: \"5de70ea0-722a-4efb-8f49-f005ef4f661c\") " pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:06 crc kubenswrapper[4922]: I1128 07:09:06.949738 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:07 crc kubenswrapper[4922]: I1128 07:09:07.102064 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w"] Nov 28 07:09:07 crc kubenswrapper[4922]: W1128 07:09:07.111990 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod60818a80_77e8_4256_a958_a6333c763453.slice/crio-58bb3d8433a432ceea545af14d02fe35377f51dce814b362004981b28f37127a WatchSource:0}: Error finding container 58bb3d8433a432ceea545af14d02fe35377f51dce814b362004981b28f37127a: Status 404 returned error can't find the container with id 58bb3d8433a432ceea545af14d02fe35377f51dce814b362004981b28f37127a Nov 28 07:09:07 crc kubenswrapper[4922]: I1128 07:09:07.160522 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6"] Nov 28 07:09:07 crc kubenswrapper[4922]: I1128 07:09:07.176486 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" event={"ID":"5de70ea0-722a-4efb-8f49-f005ef4f661c","Type":"ContainerStarted","Data":"35d25c05e1a1e075d3439f5a612dfe7f179e18f9ac756f285aed2f5ea3b9bd20"} Nov 28 07:09:07 crc kubenswrapper[4922]: I1128 07:09:07.177775 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" event={"ID":"60818a80-77e8-4256-a958-a6333c763453","Type":"ContainerStarted","Data":"58bb3d8433a432ceea545af14d02fe35377f51dce814b362004981b28f37127a"} Nov 28 07:09:14 crc kubenswrapper[4922]: I1128 07:09:14.237729 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" event={"ID":"5de70ea0-722a-4efb-8f49-f005ef4f661c","Type":"ContainerStarted","Data":"4e61f078b8e0a2dafc0f280045187a4730e2f6ddbc38ae77f15da9b382b7d82b"} Nov 28 07:09:14 crc kubenswrapper[4922]: I1128 07:09:14.238335 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:14 crc kubenswrapper[4922]: I1128 07:09:14.240828 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" event={"ID":"60818a80-77e8-4256-a958-a6333c763453","Type":"ContainerStarted","Data":"6ef26d287f0edca677686249b784c4ec469a4a8ac7c55f30aef12078036c24df"} Nov 28 07:09:14 crc kubenswrapper[4922]: I1128 07:09:14.240957 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:14 crc kubenswrapper[4922]: I1128 07:09:14.269313 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" podStartSLOduration=2.310186983 podStartE2EDuration="8.269292881s" podCreationTimestamp="2025-11-28 07:09:06 +0000 UTC" firstStartedPulling="2025-11-28 07:09:07.166311925 +0000 UTC m=+992.086707507" lastFinishedPulling="2025-11-28 07:09:13.125417813 +0000 UTC m=+998.045813405" observedRunningTime="2025-11-28 07:09:14.264272985 +0000 UTC m=+999.184668587" watchObservedRunningTime="2025-11-28 07:09:14.269292881 +0000 UTC m=+999.189688463" Nov 28 07:09:14 crc kubenswrapper[4922]: I1128 07:09:14.293897 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" podStartSLOduration=2.300751907 podStartE2EDuration="8.293880616s" podCreationTimestamp="2025-11-28 07:09:06 +0000 UTC" firstStartedPulling="2025-11-28 07:09:07.114720628 +0000 UTC m=+992.035116210" lastFinishedPulling="2025-11-28 07:09:13.107849327 +0000 UTC m=+998.028244919" observedRunningTime="2025-11-28 07:09:14.288702026 +0000 UTC m=+999.209097618" watchObservedRunningTime="2025-11-28 07:09:14.293880616 +0000 UTC m=+999.214276198" Nov 28 07:09:26 crc kubenswrapper[4922]: I1128 07:09:26.959320 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-74d65d85cd-qs7c6" Nov 28 07:09:27 crc kubenswrapper[4922]: I1128 07:09:27.312109 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:09:27 crc kubenswrapper[4922]: I1128 07:09:27.312207 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:09:27 crc kubenswrapper[4922]: I1128 07:09:27.312328 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:09:27 crc kubenswrapper[4922]: I1128 07:09:27.313147 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e4c215f7c16b1b6ee789152f5bfa304df0b7e2d633a6748eb5b815f0448ea2e7"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:09:27 crc kubenswrapper[4922]: I1128 07:09:27.313291 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://e4c215f7c16b1b6ee789152f5bfa304df0b7e2d633a6748eb5b815f0448ea2e7" gracePeriod=600 Nov 28 07:09:28 crc kubenswrapper[4922]: I1128 07:09:28.319253 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="e4c215f7c16b1b6ee789152f5bfa304df0b7e2d633a6748eb5b815f0448ea2e7" exitCode=0 Nov 28 07:09:28 crc kubenswrapper[4922]: I1128 07:09:28.319263 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"e4c215f7c16b1b6ee789152f5bfa304df0b7e2d633a6748eb5b815f0448ea2e7"} Nov 28 07:09:28 crc kubenswrapper[4922]: I1128 07:09:28.319636 4922 scope.go:117] "RemoveContainer" containerID="9242a10d6f42a0f0e383f361e9eb3616225ad2f2836bf6798d0eb7b0a3a3d7b4" Nov 28 07:09:29 crc kubenswrapper[4922]: I1128 07:09:29.329497 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"74644ed2805eab754b767a063d8b9fa8b033ceca1db2f16aed9a8b2d915a2091"} Nov 28 07:09:46 crc kubenswrapper[4922]: I1128 07:09:46.652434 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5cdbbf65c9-tnz7w" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.460861 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-qgjm8"] Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.463768 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.467818 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-n6fn8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.468293 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.468492 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.476869 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm"] Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.477967 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.481566 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.505471 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm"] Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.543928 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-ncff2"] Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.544987 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.548868 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-l2klp" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.548978 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.549045 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.549700 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.569570 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-5zhcz"] Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.570741 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.573674 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.580210 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-5zhcz"] Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632273 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-reloader\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632329 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-conf\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632351 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-metrics\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632378 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/70214104-7571-4422-9e5d-85d690d8469f-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-g5vcm\" (UID: \"70214104-7571-4422-9e5d-85d690d8469f\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632437 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72bbf902-847b-45f2-9bb3-57de7a0a88ce-metrics-certs\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632467 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/33fadc85-85d3-48cc-977c-babb047b9a0c-metallb-excludel2\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632498 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62nmp\" (UniqueName: \"kubernetes.io/projected/72bbf902-847b-45f2-9bb3-57de7a0a88ce-kube-api-access-62nmp\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632524 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-startup\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632565 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-sockets\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632591 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8tzp\" (UniqueName: \"kubernetes.io/projected/33fadc85-85d3-48cc-977c-babb047b9a0c-kube-api-access-d8tzp\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632621 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632650 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-metrics-certs\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.632679 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkg4j\" (UniqueName: \"kubernetes.io/projected/70214104-7571-4422-9e5d-85d690d8469f-kube-api-access-pkg4j\") pod \"frr-k8s-webhook-server-7fcb986d4-g5vcm\" (UID: \"70214104-7571-4422-9e5d-85d690d8469f\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.734975 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/70214104-7571-4422-9e5d-85d690d8469f-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-g5vcm\" (UID: \"70214104-7571-4422-9e5d-85d690d8469f\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735090 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72bbf902-847b-45f2-9bb3-57de7a0a88ce-metrics-certs\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735124 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/33fadc85-85d3-48cc-977c-babb047b9a0c-metallb-excludel2\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735148 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62nmp\" (UniqueName: \"kubernetes.io/projected/72bbf902-847b-45f2-9bb3-57de7a0a88ce-kube-api-access-62nmp\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735174 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-startup\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735281 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20cf8473-8d3f-479b-8126-ecf9370b3b75-cert\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735311 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-sockets\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735339 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8tzp\" (UniqueName: \"kubernetes.io/projected/33fadc85-85d3-48cc-977c-babb047b9a0c-kube-api-access-d8tzp\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735364 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735388 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/20cf8473-8d3f-479b-8126-ecf9370b3b75-metrics-certs\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735416 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-metrics-certs\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735449 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkg4j\" (UniqueName: \"kubernetes.io/projected/70214104-7571-4422-9e5d-85d690d8469f-kube-api-access-pkg4j\") pod \"frr-k8s-webhook-server-7fcb986d4-g5vcm\" (UID: \"70214104-7571-4422-9e5d-85d690d8469f\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735476 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-reloader\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735509 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dskz\" (UniqueName: \"kubernetes.io/projected/20cf8473-8d3f-479b-8126-ecf9370b3b75-kube-api-access-4dskz\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735544 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-conf\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.735566 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-metrics\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.736019 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-metrics\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: E1128 07:09:47.736135 4922 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 07:09:47 crc kubenswrapper[4922]: E1128 07:09:47.736215 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist podName:33fadc85-85d3-48cc-977c-babb047b9a0c nodeName:}" failed. No retries permitted until 2025-11-28 07:09:48.236171764 +0000 UTC m=+1033.156567346 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist") pod "speaker-ncff2" (UID: "33fadc85-85d3-48cc-977c-babb047b9a0c") : secret "metallb-memberlist" not found Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.737497 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/33fadc85-85d3-48cc-977c-babb047b9a0c-metallb-excludel2\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.737582 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-startup\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.737888 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-reloader\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.738252 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-conf\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.738216 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/72bbf902-847b-45f2-9bb3-57de7a0a88ce-frr-sockets\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.741595 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/70214104-7571-4422-9e5d-85d690d8469f-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-g5vcm\" (UID: \"70214104-7571-4422-9e5d-85d690d8469f\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.741676 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-metrics-certs\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.746803 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72bbf902-847b-45f2-9bb3-57de7a0a88ce-metrics-certs\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.754883 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62nmp\" (UniqueName: \"kubernetes.io/projected/72bbf902-847b-45f2-9bb3-57de7a0a88ce-kube-api-access-62nmp\") pod \"frr-k8s-qgjm8\" (UID: \"72bbf902-847b-45f2-9bb3-57de7a0a88ce\") " pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.756900 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8tzp\" (UniqueName: \"kubernetes.io/projected/33fadc85-85d3-48cc-977c-babb047b9a0c-kube-api-access-d8tzp\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.760404 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkg4j\" (UniqueName: \"kubernetes.io/projected/70214104-7571-4422-9e5d-85d690d8469f-kube-api-access-pkg4j\") pod \"frr-k8s-webhook-server-7fcb986d4-g5vcm\" (UID: \"70214104-7571-4422-9e5d-85d690d8469f\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.799540 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.818963 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.836798 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20cf8473-8d3f-479b-8126-ecf9370b3b75-cert\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.836849 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/20cf8473-8d3f-479b-8126-ecf9370b3b75-metrics-certs\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.836884 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dskz\" (UniqueName: \"kubernetes.io/projected/20cf8473-8d3f-479b-8126-ecf9370b3b75-kube-api-access-4dskz\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.838593 4922 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.841623 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/20cf8473-8d3f-479b-8126-ecf9370b3b75-metrics-certs\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.850126 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20cf8473-8d3f-479b-8126-ecf9370b3b75-cert\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.857987 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dskz\" (UniqueName: \"kubernetes.io/projected/20cf8473-8d3f-479b-8126-ecf9370b3b75-kube-api-access-4dskz\") pod \"controller-f8648f98b-5zhcz\" (UID: \"20cf8473-8d3f-479b-8126-ecf9370b3b75\") " pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:47 crc kubenswrapper[4922]: I1128 07:09:47.883583 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:48 crc kubenswrapper[4922]: I1128 07:09:48.250048 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:48 crc kubenswrapper[4922]: E1128 07:09:48.250522 4922 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 07:09:48 crc kubenswrapper[4922]: E1128 07:09:48.250582 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist podName:33fadc85-85d3-48cc-977c-babb047b9a0c nodeName:}" failed. No retries permitted until 2025-11-28 07:09:49.250561885 +0000 UTC m=+1034.170957467 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist") pod "speaker-ncff2" (UID: "33fadc85-85d3-48cc-977c-babb047b9a0c") : secret "metallb-memberlist" not found Nov 28 07:09:48 crc kubenswrapper[4922]: I1128 07:09:48.250690 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm"] Nov 28 07:09:48 crc kubenswrapper[4922]: I1128 07:09:48.294575 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-5zhcz"] Nov 28 07:09:48 crc kubenswrapper[4922]: I1128 07:09:48.446143 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5zhcz" event={"ID":"20cf8473-8d3f-479b-8126-ecf9370b3b75","Type":"ContainerStarted","Data":"01a12dba7067f94579db07dfcf4b3eda99b445232111dcf49bf47d13d336d343"} Nov 28 07:09:48 crc kubenswrapper[4922]: I1128 07:09:48.446192 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5zhcz" event={"ID":"20cf8473-8d3f-479b-8126-ecf9370b3b75","Type":"ContainerStarted","Data":"16e8d4d4a3f46a1d4a4c989fabd825edff31b7933cbea36edd1244b462a89965"} Nov 28 07:09:48 crc kubenswrapper[4922]: I1128 07:09:48.447196 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" event={"ID":"70214104-7571-4422-9e5d-85d690d8469f","Type":"ContainerStarted","Data":"308697ea71d0f4bd2882dd1649ec234b825c5825803f1dc1ae843e6d2a6525a5"} Nov 28 07:09:48 crc kubenswrapper[4922]: I1128 07:09:48.448716 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerStarted","Data":"53537306de917d95af14d7263dbd87afdd150796d08b759bea4a641b67fff2bb"} Nov 28 07:09:49 crc kubenswrapper[4922]: I1128 07:09:49.262316 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:49 crc kubenswrapper[4922]: I1128 07:09:49.269983 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/33fadc85-85d3-48cc-977c-babb047b9a0c-memberlist\") pod \"speaker-ncff2\" (UID: \"33fadc85-85d3-48cc-977c-babb047b9a0c\") " pod="metallb-system/speaker-ncff2" Nov 28 07:09:49 crc kubenswrapper[4922]: I1128 07:09:49.362300 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-ncff2" Nov 28 07:09:49 crc kubenswrapper[4922]: W1128 07:09:49.378727 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33fadc85_85d3_48cc_977c_babb047b9a0c.slice/crio-fee57925f8f9b80a83c1b2c9ec350f930f4834f844205a7748eb2bb974f5c4ad WatchSource:0}: Error finding container fee57925f8f9b80a83c1b2c9ec350f930f4834f844205a7748eb2bb974f5c4ad: Status 404 returned error can't find the container with id fee57925f8f9b80a83c1b2c9ec350f930f4834f844205a7748eb2bb974f5c4ad Nov 28 07:09:49 crc kubenswrapper[4922]: I1128 07:09:49.455595 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5zhcz" event={"ID":"20cf8473-8d3f-479b-8126-ecf9370b3b75","Type":"ContainerStarted","Data":"25ae7704bbc608572ac032feb5e088ea4e5f78fb9bd3c86d2f7af0cca8d0e7c5"} Nov 28 07:09:49 crc kubenswrapper[4922]: I1128 07:09:49.455714 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:09:49 crc kubenswrapper[4922]: I1128 07:09:49.456885 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ncff2" event={"ID":"33fadc85-85d3-48cc-977c-babb047b9a0c","Type":"ContainerStarted","Data":"fee57925f8f9b80a83c1b2c9ec350f930f4834f844205a7748eb2bb974f5c4ad"} Nov 28 07:09:49 crc kubenswrapper[4922]: I1128 07:09:49.480080 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-5zhcz" podStartSLOduration=2.480056991 podStartE2EDuration="2.480056991s" podCreationTimestamp="2025-11-28 07:09:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:09:49.472589058 +0000 UTC m=+1034.392984640" watchObservedRunningTime="2025-11-28 07:09:49.480056991 +0000 UTC m=+1034.400452593" Nov 28 07:09:50 crc kubenswrapper[4922]: I1128 07:09:50.465823 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ncff2" event={"ID":"33fadc85-85d3-48cc-977c-babb047b9a0c","Type":"ContainerStarted","Data":"eaa8578ac71405f19541f460d1305a4ed9a4b918ff6364038cc887655ac42071"} Nov 28 07:09:50 crc kubenswrapper[4922]: I1128 07:09:50.466138 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ncff2" event={"ID":"33fadc85-85d3-48cc-977c-babb047b9a0c","Type":"ContainerStarted","Data":"1370d9d2d3672f5e7486fdec075eb5a031303cd305bda0963ea6a0ddf5336df5"} Nov 28 07:09:50 crc kubenswrapper[4922]: I1128 07:09:50.487557 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-ncff2" podStartSLOduration=3.487538474 podStartE2EDuration="3.487538474s" podCreationTimestamp="2025-11-28 07:09:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:09:50.480171545 +0000 UTC m=+1035.400567147" watchObservedRunningTime="2025-11-28 07:09:50.487538474 +0000 UTC m=+1035.407934076" Nov 28 07:09:51 crc kubenswrapper[4922]: I1128 07:09:51.470787 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-ncff2" Nov 28 07:09:55 crc kubenswrapper[4922]: I1128 07:09:55.509252 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" event={"ID":"70214104-7571-4422-9e5d-85d690d8469f","Type":"ContainerStarted","Data":"c0d078d125da23191ba8e2717aac8e6d8bbe8cb166e1b76271941bdfff5f3b04"} Nov 28 07:09:55 crc kubenswrapper[4922]: I1128 07:09:55.509978 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:09:55 crc kubenswrapper[4922]: I1128 07:09:55.511448 4922 generic.go:334] "Generic (PLEG): container finished" podID="72bbf902-847b-45f2-9bb3-57de7a0a88ce" containerID="486e7c6af0535aacd9fe53c175a23be6b3d28c6e097e45fea7774a8e2b349838" exitCode=0 Nov 28 07:09:55 crc kubenswrapper[4922]: I1128 07:09:55.511486 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerDied","Data":"486e7c6af0535aacd9fe53c175a23be6b3d28c6e097e45fea7774a8e2b349838"} Nov 28 07:09:55 crc kubenswrapper[4922]: I1128 07:09:55.534429 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" podStartSLOduration=2.155485191 podStartE2EDuration="8.534402969s" podCreationTimestamp="2025-11-28 07:09:47 +0000 UTC" firstStartedPulling="2025-11-28 07:09:48.253731681 +0000 UTC m=+1033.174127263" lastFinishedPulling="2025-11-28 07:09:54.632649439 +0000 UTC m=+1039.553045041" observedRunningTime="2025-11-28 07:09:55.5296443 +0000 UTC m=+1040.450039912" watchObservedRunningTime="2025-11-28 07:09:55.534402969 +0000 UTC m=+1040.454798571" Nov 28 07:09:56 crc kubenswrapper[4922]: I1128 07:09:56.523648 4922 generic.go:334] "Generic (PLEG): container finished" podID="72bbf902-847b-45f2-9bb3-57de7a0a88ce" containerID="118c55f956fd711243e59fa3e7ad9e9903eb2243ba5984a0e481c248f2e7353a" exitCode=0 Nov 28 07:09:56 crc kubenswrapper[4922]: I1128 07:09:56.523757 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerDied","Data":"118c55f956fd711243e59fa3e7ad9e9903eb2243ba5984a0e481c248f2e7353a"} Nov 28 07:09:57 crc kubenswrapper[4922]: I1128 07:09:57.534760 4922 generic.go:334] "Generic (PLEG): container finished" podID="72bbf902-847b-45f2-9bb3-57de7a0a88ce" containerID="b4677933c08561cea7f31722e9dcd99948e26d259047927a41e5c6345c02665d" exitCode=0 Nov 28 07:09:57 crc kubenswrapper[4922]: I1128 07:09:57.534840 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerDied","Data":"b4677933c08561cea7f31722e9dcd99948e26d259047927a41e5c6345c02665d"} Nov 28 07:09:58 crc kubenswrapper[4922]: I1128 07:09:58.548660 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerStarted","Data":"824a3b11bf1e900575006f1ef9aab7827b8535447f584605e8efbc8ea6698585"} Nov 28 07:09:58 crc kubenswrapper[4922]: I1128 07:09:58.549047 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerStarted","Data":"0abe0e9a146d841c456688dc1ed295d6ce8f404f16cbd2ae4618b75d85f2736e"} Nov 28 07:09:58 crc kubenswrapper[4922]: I1128 07:09:58.549058 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerStarted","Data":"575337b9888ad0378a19987f06b9b3712bc6e4a9d721835595c09af66b67cbaa"} Nov 28 07:09:58 crc kubenswrapper[4922]: I1128 07:09:58.549089 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerStarted","Data":"112dce305fdb895bf76ce1ff531c212c804b5ef5a858488ae9a35024511d79e1"} Nov 28 07:09:58 crc kubenswrapper[4922]: I1128 07:09:58.549099 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerStarted","Data":"bfbf2208a2ba63634f31dd6fdb59ae2617f0a469bfbbe8b5d007ebfba09a357b"} Nov 28 07:09:59 crc kubenswrapper[4922]: I1128 07:09:59.373034 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-ncff2" Nov 28 07:10:00 crc kubenswrapper[4922]: I1128 07:10:00.569743 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qgjm8" event={"ID":"72bbf902-847b-45f2-9bb3-57de7a0a88ce","Type":"ContainerStarted","Data":"30c34894d174585d6fb0201dd3952618c69bb8d1c4f683a3dd185ecdec6f6a50"} Nov 28 07:10:00 crc kubenswrapper[4922]: I1128 07:10:00.570113 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:10:00 crc kubenswrapper[4922]: I1128 07:10:00.593714 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-qgjm8" podStartSLOduration=6.959126927 podStartE2EDuration="13.593693789s" podCreationTimestamp="2025-11-28 07:09:47 +0000 UTC" firstStartedPulling="2025-11-28 07:09:47.971539818 +0000 UTC m=+1032.891935400" lastFinishedPulling="2025-11-28 07:09:54.60610667 +0000 UTC m=+1039.526502262" observedRunningTime="2025-11-28 07:10:00.591678304 +0000 UTC m=+1045.512073886" watchObservedRunningTime="2025-11-28 07:10:00.593693789 +0000 UTC m=+1045.514089391" Nov 28 07:10:00 crc kubenswrapper[4922]: I1128 07:10:00.929871 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp"] Nov 28 07:10:00 crc kubenswrapper[4922]: I1128 07:10:00.930880 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:00 crc kubenswrapper[4922]: I1128 07:10:00.933712 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 07:10:00 crc kubenswrapper[4922]: I1128 07:10:00.952635 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp"] Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.043728 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.043803 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.043856 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trkrk\" (UniqueName: \"kubernetes.io/projected/560410b8-61f1-465c-a05e-edc9b25f15c5-kube-api-access-trkrk\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.144961 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trkrk\" (UniqueName: \"kubernetes.io/projected/560410b8-61f1-465c-a05e-edc9b25f15c5-kube-api-access-trkrk\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.145135 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.145266 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.145713 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.145871 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.171209 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trkrk\" (UniqueName: \"kubernetes.io/projected/560410b8-61f1-465c-a05e-edc9b25f15c5-kube-api-access-trkrk\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.297480 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.510588 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp"] Nov 28 07:10:01 crc kubenswrapper[4922]: I1128 07:10:01.578094 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" event={"ID":"560410b8-61f1-465c-a05e-edc9b25f15c5","Type":"ContainerStarted","Data":"563533f78ccaff9fa6accaf003e0aad69f11dc1b9c358471a8b91adeae15b069"} Nov 28 07:10:02 crc kubenswrapper[4922]: I1128 07:10:02.586790 4922 generic.go:334] "Generic (PLEG): container finished" podID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerID="0d7e24efbba6f0ba9a12e6851e011abe5e953daad3a77d38550f532f300112f9" exitCode=0 Nov 28 07:10:02 crc kubenswrapper[4922]: I1128 07:10:02.586849 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" event={"ID":"560410b8-61f1-465c-a05e-edc9b25f15c5","Type":"ContainerDied","Data":"0d7e24efbba6f0ba9a12e6851e011abe5e953daad3a77d38550f532f300112f9"} Nov 28 07:10:02 crc kubenswrapper[4922]: I1128 07:10:02.800264 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:10:02 crc kubenswrapper[4922]: I1128 07:10:02.840173 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:10:07 crc kubenswrapper[4922]: I1128 07:10:07.631705 4922 generic.go:334] "Generic (PLEG): container finished" podID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerID="e35470094b6cc081a21c1321e7750a6e2def546b34c09063927fc272f7d89100" exitCode=0 Nov 28 07:10:07 crc kubenswrapper[4922]: I1128 07:10:07.631785 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" event={"ID":"560410b8-61f1-465c-a05e-edc9b25f15c5","Type":"ContainerDied","Data":"e35470094b6cc081a21c1321e7750a6e2def546b34c09063927fc272f7d89100"} Nov 28 07:10:07 crc kubenswrapper[4922]: I1128 07:10:07.807154 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-qgjm8" Nov 28 07:10:07 crc kubenswrapper[4922]: I1128 07:10:07.831469 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-g5vcm" Nov 28 07:10:07 crc kubenswrapper[4922]: I1128 07:10:07.918611 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-5zhcz" Nov 28 07:10:08 crc kubenswrapper[4922]: I1128 07:10:08.641385 4922 generic.go:334] "Generic (PLEG): container finished" podID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerID="cb03ef061bb1899a2d29c26f3ee1e898d80e58d8914cd7ce02418013cc5ed9fd" exitCode=0 Nov 28 07:10:08 crc kubenswrapper[4922]: I1128 07:10:08.641424 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" event={"ID":"560410b8-61f1-465c-a05e-edc9b25f15c5","Type":"ContainerDied","Data":"cb03ef061bb1899a2d29c26f3ee1e898d80e58d8914cd7ce02418013cc5ed9fd"} Nov 28 07:10:09 crc kubenswrapper[4922]: I1128 07:10:09.960530 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.112588 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-bundle\") pod \"560410b8-61f1-465c-a05e-edc9b25f15c5\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.112647 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-trkrk\" (UniqueName: \"kubernetes.io/projected/560410b8-61f1-465c-a05e-edc9b25f15c5-kube-api-access-trkrk\") pod \"560410b8-61f1-465c-a05e-edc9b25f15c5\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.112677 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-util\") pod \"560410b8-61f1-465c-a05e-edc9b25f15c5\" (UID: \"560410b8-61f1-465c-a05e-edc9b25f15c5\") " Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.114070 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-bundle" (OuterVolumeSpecName: "bundle") pod "560410b8-61f1-465c-a05e-edc9b25f15c5" (UID: "560410b8-61f1-465c-a05e-edc9b25f15c5"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.123052 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-util" (OuterVolumeSpecName: "util") pod "560410b8-61f1-465c-a05e-edc9b25f15c5" (UID: "560410b8-61f1-465c-a05e-edc9b25f15c5"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.124357 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/560410b8-61f1-465c-a05e-edc9b25f15c5-kube-api-access-trkrk" (OuterVolumeSpecName: "kube-api-access-trkrk") pod "560410b8-61f1-465c-a05e-edc9b25f15c5" (UID: "560410b8-61f1-465c-a05e-edc9b25f15c5"). InnerVolumeSpecName "kube-api-access-trkrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.214533 4922 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.214566 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-trkrk\" (UniqueName: \"kubernetes.io/projected/560410b8-61f1-465c-a05e-edc9b25f15c5-kube-api-access-trkrk\") on node \"crc\" DevicePath \"\"" Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.214576 4922 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/560410b8-61f1-465c-a05e-edc9b25f15c5-util\") on node \"crc\" DevicePath \"\"" Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.657699 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" event={"ID":"560410b8-61f1-465c-a05e-edc9b25f15c5","Type":"ContainerDied","Data":"563533f78ccaff9fa6accaf003e0aad69f11dc1b9c358471a8b91adeae15b069"} Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.657760 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="563533f78ccaff9fa6accaf003e0aad69f11dc1b9c358471a8b91adeae15b069" Nov 28 07:10:10 crc kubenswrapper[4922]: I1128 07:10:10.657775 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.377657 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt"] Nov 28 07:10:14 crc kubenswrapper[4922]: E1128 07:10:14.378472 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerName="extract" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.378490 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerName="extract" Nov 28 07:10:14 crc kubenswrapper[4922]: E1128 07:10:14.378512 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerName="util" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.378521 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerName="util" Nov 28 07:10:14 crc kubenswrapper[4922]: E1128 07:10:14.378530 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerName="pull" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.378538 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerName="pull" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.378652 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="560410b8-61f1-465c-a05e-edc9b25f15c5" containerName="extract" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.379128 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.381775 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.382282 4922 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-d8js5" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.382398 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.393273 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt"] Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.471072 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/112a086a-8225-4b78-9006-2e40dde3268b-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-q4lzt\" (UID: \"112a086a-8225-4b78-9006-2e40dde3268b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.471132 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsdf4\" (UniqueName: \"kubernetes.io/projected/112a086a-8225-4b78-9006-2e40dde3268b-kube-api-access-tsdf4\") pod \"cert-manager-operator-controller-manager-64cf6dff88-q4lzt\" (UID: \"112a086a-8225-4b78-9006-2e40dde3268b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.573747 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsdf4\" (UniqueName: \"kubernetes.io/projected/112a086a-8225-4b78-9006-2e40dde3268b-kube-api-access-tsdf4\") pod \"cert-manager-operator-controller-manager-64cf6dff88-q4lzt\" (UID: \"112a086a-8225-4b78-9006-2e40dde3268b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.573885 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/112a086a-8225-4b78-9006-2e40dde3268b-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-q4lzt\" (UID: \"112a086a-8225-4b78-9006-2e40dde3268b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.574369 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/112a086a-8225-4b78-9006-2e40dde3268b-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-q4lzt\" (UID: \"112a086a-8225-4b78-9006-2e40dde3268b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.603176 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsdf4\" (UniqueName: \"kubernetes.io/projected/112a086a-8225-4b78-9006-2e40dde3268b-kube-api-access-tsdf4\") pod \"cert-manager-operator-controller-manager-64cf6dff88-q4lzt\" (UID: \"112a086a-8225-4b78-9006-2e40dde3268b\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" Nov 28 07:10:14 crc kubenswrapper[4922]: I1128 07:10:14.698317 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" Nov 28 07:10:15 crc kubenswrapper[4922]: I1128 07:10:15.095336 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt"] Nov 28 07:10:15 crc kubenswrapper[4922]: W1128 07:10:15.104810 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod112a086a_8225_4b78_9006_2e40dde3268b.slice/crio-3a8d3c825cb17789110f14b1ccf852de66696a487418d5f259fb452b14006b6e WatchSource:0}: Error finding container 3a8d3c825cb17789110f14b1ccf852de66696a487418d5f259fb452b14006b6e: Status 404 returned error can't find the container with id 3a8d3c825cb17789110f14b1ccf852de66696a487418d5f259fb452b14006b6e Nov 28 07:10:15 crc kubenswrapper[4922]: I1128 07:10:15.741278 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" event={"ID":"112a086a-8225-4b78-9006-2e40dde3268b","Type":"ContainerStarted","Data":"3a8d3c825cb17789110f14b1ccf852de66696a487418d5f259fb452b14006b6e"} Nov 28 07:10:25 crc kubenswrapper[4922]: I1128 07:10:25.806382 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" event={"ID":"112a086a-8225-4b78-9006-2e40dde3268b","Type":"ContainerStarted","Data":"711c26f7d782c30395929b1624c3107125cf6e60a78c82b728c7a54ec4ce9118"} Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.365740 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-q4lzt" podStartSLOduration=5.203996934 podStartE2EDuration="15.365725562s" podCreationTimestamp="2025-11-28 07:10:14 +0000 UTC" firstStartedPulling="2025-11-28 07:10:15.109413038 +0000 UTC m=+1060.029808620" lastFinishedPulling="2025-11-28 07:10:25.271141626 +0000 UTC m=+1070.191537248" observedRunningTime="2025-11-28 07:10:25.835078378 +0000 UTC m=+1070.755474020" watchObservedRunningTime="2025-11-28 07:10:29.365725562 +0000 UTC m=+1074.286121144" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.366145 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-r8sg6"] Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.366944 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.369335 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.370174 4922 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-xlglw" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.370248 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.376211 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-r8sg6"] Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.386822 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9a71a28c-df53-4df0-b3a8-5caae467bb94-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-r8sg6\" (UID: \"9a71a28c-df53-4df0-b3a8-5caae467bb94\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.386879 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chdjk\" (UniqueName: \"kubernetes.io/projected/9a71a28c-df53-4df0-b3a8-5caae467bb94-kube-api-access-chdjk\") pod \"cert-manager-webhook-f4fb5df64-r8sg6\" (UID: \"9a71a28c-df53-4df0-b3a8-5caae467bb94\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.488421 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9a71a28c-df53-4df0-b3a8-5caae467bb94-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-r8sg6\" (UID: \"9a71a28c-df53-4df0-b3a8-5caae467bb94\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.488481 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chdjk\" (UniqueName: \"kubernetes.io/projected/9a71a28c-df53-4df0-b3a8-5caae467bb94-kube-api-access-chdjk\") pod \"cert-manager-webhook-f4fb5df64-r8sg6\" (UID: \"9a71a28c-df53-4df0-b3a8-5caae467bb94\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.511639 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9a71a28c-df53-4df0-b3a8-5caae467bb94-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-r8sg6\" (UID: \"9a71a28c-df53-4df0-b3a8-5caae467bb94\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.511946 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chdjk\" (UniqueName: \"kubernetes.io/projected/9a71a28c-df53-4df0-b3a8-5caae467bb94-kube-api-access-chdjk\") pod \"cert-manager-webhook-f4fb5df64-r8sg6\" (UID: \"9a71a28c-df53-4df0-b3a8-5caae467bb94\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.701736 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:29 crc kubenswrapper[4922]: I1128 07:10:29.948948 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-r8sg6"] Nov 28 07:10:29 crc kubenswrapper[4922]: W1128 07:10:29.971356 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a71a28c_df53_4df0_b3a8_5caae467bb94.slice/crio-d1498bf9852699708c2a3678cbac22fbe14a0e34cf330fb8cc442a04a4666433 WatchSource:0}: Error finding container d1498bf9852699708c2a3678cbac22fbe14a0e34cf330fb8cc442a04a4666433: Status 404 returned error can't find the container with id d1498bf9852699708c2a3678cbac22fbe14a0e34cf330fb8cc442a04a4666433 Nov 28 07:10:30 crc kubenswrapper[4922]: I1128 07:10:30.843262 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" event={"ID":"9a71a28c-df53-4df0-b3a8-5caae467bb94","Type":"ContainerStarted","Data":"d1498bf9852699708c2a3678cbac22fbe14a0e34cf330fb8cc442a04a4666433"} Nov 28 07:10:31 crc kubenswrapper[4922]: I1128 07:10:31.936804 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-mqk48"] Nov 28 07:10:31 crc kubenswrapper[4922]: I1128 07:10:31.937842 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" Nov 28 07:10:31 crc kubenswrapper[4922]: I1128 07:10:31.939768 4922 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-rckf7" Nov 28 07:10:31 crc kubenswrapper[4922]: I1128 07:10:31.947309 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-mqk48"] Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.019498 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bfc3b5d5-095c-444a-a868-2c7318e4ae1f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-mqk48\" (UID: \"bfc3b5d5-095c-444a-a868-2c7318e4ae1f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.019749 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7m86\" (UniqueName: \"kubernetes.io/projected/bfc3b5d5-095c-444a-a868-2c7318e4ae1f-kube-api-access-k7m86\") pod \"cert-manager-cainjector-855d9ccff4-mqk48\" (UID: \"bfc3b5d5-095c-444a-a868-2c7318e4ae1f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.121057 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7m86\" (UniqueName: \"kubernetes.io/projected/bfc3b5d5-095c-444a-a868-2c7318e4ae1f-kube-api-access-k7m86\") pod \"cert-manager-cainjector-855d9ccff4-mqk48\" (UID: \"bfc3b5d5-095c-444a-a868-2c7318e4ae1f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.121165 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bfc3b5d5-095c-444a-a868-2c7318e4ae1f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-mqk48\" (UID: \"bfc3b5d5-095c-444a-a868-2c7318e4ae1f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.138656 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bfc3b5d5-095c-444a-a868-2c7318e4ae1f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-mqk48\" (UID: \"bfc3b5d5-095c-444a-a868-2c7318e4ae1f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.138832 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7m86\" (UniqueName: \"kubernetes.io/projected/bfc3b5d5-095c-444a-a868-2c7318e4ae1f-kube-api-access-k7m86\") pod \"cert-manager-cainjector-855d9ccff4-mqk48\" (UID: \"bfc3b5d5-095c-444a-a868-2c7318e4ae1f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.256273 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.813069 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-mqk48"] Nov 28 07:10:32 crc kubenswrapper[4922]: W1128 07:10:32.815490 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfc3b5d5_095c_444a_a868_2c7318e4ae1f.slice/crio-5de29bb6ed7ee54e2a544567486e733dc0eb9254e9b7301e5b9d78f2e15229e4 WatchSource:0}: Error finding container 5de29bb6ed7ee54e2a544567486e733dc0eb9254e9b7301e5b9d78f2e15229e4: Status 404 returned error can't find the container with id 5de29bb6ed7ee54e2a544567486e733dc0eb9254e9b7301e5b9d78f2e15229e4 Nov 28 07:10:32 crc kubenswrapper[4922]: I1128 07:10:32.855100 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" event={"ID":"bfc3b5d5-095c-444a-a868-2c7318e4ae1f","Type":"ContainerStarted","Data":"5de29bb6ed7ee54e2a544567486e733dc0eb9254e9b7301e5b9d78f2e15229e4"} Nov 28 07:10:37 crc kubenswrapper[4922]: I1128 07:10:37.894459 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" event={"ID":"bfc3b5d5-095c-444a-a868-2c7318e4ae1f","Type":"ContainerStarted","Data":"0796bfcf87c2ced369c905ad61c5e4e10c0000a32841dd7080e4958614cc130b"} Nov 28 07:10:37 crc kubenswrapper[4922]: I1128 07:10:37.896914 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" event={"ID":"9a71a28c-df53-4df0-b3a8-5caae467bb94","Type":"ContainerStarted","Data":"df66735555268160e5f021701efe5eea30c11179d8068477af64c73f7932a1e2"} Nov 28 07:10:37 crc kubenswrapper[4922]: I1128 07:10:37.897052 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:37 crc kubenswrapper[4922]: I1128 07:10:37.913370 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-mqk48" podStartSLOduration=2.418761932 podStartE2EDuration="6.913339499s" podCreationTimestamp="2025-11-28 07:10:31 +0000 UTC" firstStartedPulling="2025-11-28 07:10:32.817343575 +0000 UTC m=+1077.737739167" lastFinishedPulling="2025-11-28 07:10:37.311921152 +0000 UTC m=+1082.232316734" observedRunningTime="2025-11-28 07:10:37.910379079 +0000 UTC m=+1082.830774671" watchObservedRunningTime="2025-11-28 07:10:37.913339499 +0000 UTC m=+1082.833735111" Nov 28 07:10:37 crc kubenswrapper[4922]: I1128 07:10:37.940741 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" podStartSLOduration=1.5243203379999999 podStartE2EDuration="8.940721931s" podCreationTimestamp="2025-11-28 07:10:29 +0000 UTC" firstStartedPulling="2025-11-28 07:10:29.972851113 +0000 UTC m=+1074.893246695" lastFinishedPulling="2025-11-28 07:10:37.389252706 +0000 UTC m=+1082.309648288" observedRunningTime="2025-11-28 07:10:37.93812389 +0000 UTC m=+1082.858519482" watchObservedRunningTime="2025-11-28 07:10:37.940721931 +0000 UTC m=+1082.861117513" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.040127 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-dpbll"] Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.040825 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-dpbll" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.043033 4922 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-fb2pk" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.051527 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-dpbll"] Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.204237 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62gd4\" (UniqueName: \"kubernetes.io/projected/6716937d-e1d0-4f62-b3bb-8f7102bded6c-kube-api-access-62gd4\") pod \"cert-manager-86cb77c54b-dpbll\" (UID: \"6716937d-e1d0-4f62-b3bb-8f7102bded6c\") " pod="cert-manager/cert-manager-86cb77c54b-dpbll" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.204371 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6716937d-e1d0-4f62-b3bb-8f7102bded6c-bound-sa-token\") pod \"cert-manager-86cb77c54b-dpbll\" (UID: \"6716937d-e1d0-4f62-b3bb-8f7102bded6c\") " pod="cert-manager/cert-manager-86cb77c54b-dpbll" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.306542 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62gd4\" (UniqueName: \"kubernetes.io/projected/6716937d-e1d0-4f62-b3bb-8f7102bded6c-kube-api-access-62gd4\") pod \"cert-manager-86cb77c54b-dpbll\" (UID: \"6716937d-e1d0-4f62-b3bb-8f7102bded6c\") " pod="cert-manager/cert-manager-86cb77c54b-dpbll" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.306599 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6716937d-e1d0-4f62-b3bb-8f7102bded6c-bound-sa-token\") pod \"cert-manager-86cb77c54b-dpbll\" (UID: \"6716937d-e1d0-4f62-b3bb-8f7102bded6c\") " pod="cert-manager/cert-manager-86cb77c54b-dpbll" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.333587 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6716937d-e1d0-4f62-b3bb-8f7102bded6c-bound-sa-token\") pod \"cert-manager-86cb77c54b-dpbll\" (UID: \"6716937d-e1d0-4f62-b3bb-8f7102bded6c\") " pod="cert-manager/cert-manager-86cb77c54b-dpbll" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.335708 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62gd4\" (UniqueName: \"kubernetes.io/projected/6716937d-e1d0-4f62-b3bb-8f7102bded6c-kube-api-access-62gd4\") pod \"cert-manager-86cb77c54b-dpbll\" (UID: \"6716937d-e1d0-4f62-b3bb-8f7102bded6c\") " pod="cert-manager/cert-manager-86cb77c54b-dpbll" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.369509 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-dpbll" Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.573189 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-dpbll"] Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.905934 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-dpbll" event={"ID":"6716937d-e1d0-4f62-b3bb-8f7102bded6c","Type":"ContainerStarted","Data":"114b051b7930b9338a1346f6730156db5866d165420cfed306e4868c105c3c5d"} Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.906769 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-dpbll" event={"ID":"6716937d-e1d0-4f62-b3bb-8f7102bded6c","Type":"ContainerStarted","Data":"bc95a2d23fd77448d23784bc793a36ebe7fad0e02bed0eb6f4db33401f86e3e7"} Nov 28 07:10:38 crc kubenswrapper[4922]: I1128 07:10:38.926320 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-dpbll" podStartSLOduration=0.92629295 podStartE2EDuration="926.29295ms" podCreationTimestamp="2025-11-28 07:10:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:10:38.922421356 +0000 UTC m=+1083.842816988" watchObservedRunningTime="2025-11-28 07:10:38.92629295 +0000 UTC m=+1083.846688562" Nov 28 07:10:44 crc kubenswrapper[4922]: I1128 07:10:44.709272 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-r8sg6" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.115018 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-g6fjt"] Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.116475 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-g6fjt" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.119795 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-4966v" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.120376 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.126383 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.172976 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-g6fjt"] Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.263841 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lr7h\" (UniqueName: \"kubernetes.io/projected/131b6ae5-a293-4881-99af-28cc60c80b47-kube-api-access-2lr7h\") pod \"openstack-operator-index-g6fjt\" (UID: \"131b6ae5-a293-4881-99af-28cc60c80b47\") " pod="openstack-operators/openstack-operator-index-g6fjt" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.365075 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lr7h\" (UniqueName: \"kubernetes.io/projected/131b6ae5-a293-4881-99af-28cc60c80b47-kube-api-access-2lr7h\") pod \"openstack-operator-index-g6fjt\" (UID: \"131b6ae5-a293-4881-99af-28cc60c80b47\") " pod="openstack-operators/openstack-operator-index-g6fjt" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.388146 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lr7h\" (UniqueName: \"kubernetes.io/projected/131b6ae5-a293-4881-99af-28cc60c80b47-kube-api-access-2lr7h\") pod \"openstack-operator-index-g6fjt\" (UID: \"131b6ae5-a293-4881-99af-28cc60c80b47\") " pod="openstack-operators/openstack-operator-index-g6fjt" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.486486 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-g6fjt" Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.924022 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-g6fjt"] Nov 28 07:10:48 crc kubenswrapper[4922]: W1128 07:10:48.933877 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod131b6ae5_a293_4881_99af_28cc60c80b47.slice/crio-522fc924e58ee30a00faeb59c1e0e6f0af47162766aa6cd60fd9afc6d3fd9df6 WatchSource:0}: Error finding container 522fc924e58ee30a00faeb59c1e0e6f0af47162766aa6cd60fd9afc6d3fd9df6: Status 404 returned error can't find the container with id 522fc924e58ee30a00faeb59c1e0e6f0af47162766aa6cd60fd9afc6d3fd9df6 Nov 28 07:10:48 crc kubenswrapper[4922]: I1128 07:10:48.994291 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-g6fjt" event={"ID":"131b6ae5-a293-4881-99af-28cc60c80b47","Type":"ContainerStarted","Data":"522fc924e58ee30a00faeb59c1e0e6f0af47162766aa6cd60fd9afc6d3fd9df6"} Nov 28 07:10:51 crc kubenswrapper[4922]: I1128 07:10:51.465869 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-g6fjt"] Nov 28 07:10:52 crc kubenswrapper[4922]: I1128 07:10:52.075862 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-b6hrw"] Nov 28 07:10:52 crc kubenswrapper[4922]: I1128 07:10:52.077817 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:10:52 crc kubenswrapper[4922]: I1128 07:10:52.096893 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-b6hrw"] Nov 28 07:10:52 crc kubenswrapper[4922]: I1128 07:10:52.134873 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsvv7\" (UniqueName: \"kubernetes.io/projected/37166abd-f8e0-4749-8569-c133eda4d74a-kube-api-access-vsvv7\") pod \"openstack-operator-index-b6hrw\" (UID: \"37166abd-f8e0-4749-8569-c133eda4d74a\") " pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:10:52 crc kubenswrapper[4922]: I1128 07:10:52.242689 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsvv7\" (UniqueName: \"kubernetes.io/projected/37166abd-f8e0-4749-8569-c133eda4d74a-kube-api-access-vsvv7\") pod \"openstack-operator-index-b6hrw\" (UID: \"37166abd-f8e0-4749-8569-c133eda4d74a\") " pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:10:52 crc kubenswrapper[4922]: I1128 07:10:52.266171 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsvv7\" (UniqueName: \"kubernetes.io/projected/37166abd-f8e0-4749-8569-c133eda4d74a-kube-api-access-vsvv7\") pod \"openstack-operator-index-b6hrw\" (UID: \"37166abd-f8e0-4749-8569-c133eda4d74a\") " pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:10:52 crc kubenswrapper[4922]: I1128 07:10:52.408199 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.031992 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-b6hrw"] Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.084252 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-g6fjt" event={"ID":"131b6ae5-a293-4881-99af-28cc60c80b47","Type":"ContainerStarted","Data":"09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9"} Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.084427 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-g6fjt" podUID="131b6ae5-a293-4881-99af-28cc60c80b47" containerName="registry-server" containerID="cri-o://09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9" gracePeriod=2 Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.088516 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-b6hrw" event={"ID":"37166abd-f8e0-4749-8569-c133eda4d74a","Type":"ContainerStarted","Data":"646a4b3bf2c0539c90aec23f61e34064b76e49db44e49edbc32a501fb666ae75"} Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.104711 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-g6fjt" podStartSLOduration=1.108835417 podStartE2EDuration="13.104690543s" podCreationTimestamp="2025-11-28 07:10:48 +0000 UTC" firstStartedPulling="2025-11-28 07:10:48.936526487 +0000 UTC m=+1093.856922069" lastFinishedPulling="2025-11-28 07:11:00.932381573 +0000 UTC m=+1105.852777195" observedRunningTime="2025-11-28 07:11:01.102856174 +0000 UTC m=+1106.023251797" watchObservedRunningTime="2025-11-28 07:11:01.104690543 +0000 UTC m=+1106.025086135" Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.436075 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-g6fjt_131b6ae5-a293-4881-99af-28cc60c80b47/registry-server/0.log" Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.436676 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-g6fjt" Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.581603 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lr7h\" (UniqueName: \"kubernetes.io/projected/131b6ae5-a293-4881-99af-28cc60c80b47-kube-api-access-2lr7h\") pod \"131b6ae5-a293-4881-99af-28cc60c80b47\" (UID: \"131b6ae5-a293-4881-99af-28cc60c80b47\") " Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.589841 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/131b6ae5-a293-4881-99af-28cc60c80b47-kube-api-access-2lr7h" (OuterVolumeSpecName: "kube-api-access-2lr7h") pod "131b6ae5-a293-4881-99af-28cc60c80b47" (UID: "131b6ae5-a293-4881-99af-28cc60c80b47"). InnerVolumeSpecName "kube-api-access-2lr7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:11:01 crc kubenswrapper[4922]: I1128 07:11:01.683125 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lr7h\" (UniqueName: \"kubernetes.io/projected/131b6ae5-a293-4881-99af-28cc60c80b47-kube-api-access-2lr7h\") on node \"crc\" DevicePath \"\"" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.099477 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-b6hrw" event={"ID":"37166abd-f8e0-4749-8569-c133eda4d74a","Type":"ContainerStarted","Data":"fd0e4c607bc9e3691ccd9f007862de07cd8929767becc50c224b5431a76e0548"} Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.102365 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-g6fjt_131b6ae5-a293-4881-99af-28cc60c80b47/registry-server/0.log" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.102458 4922 generic.go:334] "Generic (PLEG): container finished" podID="131b6ae5-a293-4881-99af-28cc60c80b47" containerID="09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9" exitCode=2 Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.102517 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-g6fjt" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.102517 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-g6fjt" event={"ID":"131b6ae5-a293-4881-99af-28cc60c80b47","Type":"ContainerDied","Data":"09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9"} Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.102810 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-g6fjt" event={"ID":"131b6ae5-a293-4881-99af-28cc60c80b47","Type":"ContainerDied","Data":"522fc924e58ee30a00faeb59c1e0e6f0af47162766aa6cd60fd9afc6d3fd9df6"} Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.102862 4922 scope.go:117] "RemoveContainer" containerID="09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.128536 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-b6hrw" podStartSLOduration=9.541106396 podStartE2EDuration="10.128502779s" podCreationTimestamp="2025-11-28 07:10:52 +0000 UTC" firstStartedPulling="2025-11-28 07:11:01.035315061 +0000 UTC m=+1105.955710653" lastFinishedPulling="2025-11-28 07:11:01.622711454 +0000 UTC m=+1106.543107036" observedRunningTime="2025-11-28 07:11:02.12667602 +0000 UTC m=+1107.047071642" watchObservedRunningTime="2025-11-28 07:11:02.128502779 +0000 UTC m=+1107.048898411" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.152892 4922 scope.go:117] "RemoveContainer" containerID="09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9" Nov 28 07:11:02 crc kubenswrapper[4922]: E1128 07:11:02.154304 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9\": container with ID starting with 09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9 not found: ID does not exist" containerID="09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.154379 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9"} err="failed to get container status \"09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9\": rpc error: code = NotFound desc = could not find container \"09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9\": container with ID starting with 09cae7f5c9ea65728e0877877a81cfe47624ceff9c70dbaaa53c415dba337fa9 not found: ID does not exist" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.162366 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-g6fjt"] Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.182550 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-g6fjt"] Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.409027 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.409097 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:11:02 crc kubenswrapper[4922]: I1128 07:11:02.451308 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:11:03 crc kubenswrapper[4922]: I1128 07:11:03.410389 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="131b6ae5-a293-4881-99af-28cc60c80b47" path="/var/lib/kubelet/pods/131b6ae5-a293-4881-99af-28cc60c80b47/volumes" Nov 28 07:11:12 crc kubenswrapper[4922]: I1128 07:11:12.452485 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-b6hrw" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.716298 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72"] Nov 28 07:11:25 crc kubenswrapper[4922]: E1128 07:11:25.717072 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131b6ae5-a293-4881-99af-28cc60c80b47" containerName="registry-server" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.717091 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="131b6ae5-a293-4881-99af-28cc60c80b47" containerName="registry-server" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.717300 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="131b6ae5-a293-4881-99af-28cc60c80b47" containerName="registry-server" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.718717 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.720834 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-vmztx" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.732822 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72"] Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.823933 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-util\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.824257 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfqtw\" (UniqueName: \"kubernetes.io/projected/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-kube-api-access-sfqtw\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.824355 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-bundle\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.925970 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-util\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.926095 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfqtw\" (UniqueName: \"kubernetes.io/projected/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-kube-api-access-sfqtw\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.926138 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-bundle\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.926852 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-util\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.926948 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-bundle\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:25 crc kubenswrapper[4922]: I1128 07:11:25.958047 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfqtw\" (UniqueName: \"kubernetes.io/projected/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-kube-api-access-sfqtw\") pod \"b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:26 crc kubenswrapper[4922]: I1128 07:11:26.038618 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:26 crc kubenswrapper[4922]: I1128 07:11:26.474969 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72"] Nov 28 07:11:27 crc kubenswrapper[4922]: I1128 07:11:27.315081 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" event={"ID":"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451","Type":"ContainerStarted","Data":"c3259a8fffd3df49a2dde466a8e829abda0bb2b77857822c666f3196d4c88488"} Nov 28 07:11:35 crc kubenswrapper[4922]: I1128 07:11:35.398106 4922 generic.go:334] "Generic (PLEG): container finished" podID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerID="65ea14a042882ecd5de609775d83c6739801c1edabeb874ca895510cc446e658" exitCode=0 Nov 28 07:11:35 crc kubenswrapper[4922]: I1128 07:11:35.412388 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" event={"ID":"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451","Type":"ContainerDied","Data":"65ea14a042882ecd5de609775d83c6739801c1edabeb874ca895510cc446e658"} Nov 28 07:11:36 crc kubenswrapper[4922]: I1128 07:11:36.407543 4922 generic.go:334] "Generic (PLEG): container finished" podID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerID="6273c43917dad7bc4e1b54a64332f44901a12fe37747fde9351a659a2d5ad0d2" exitCode=0 Nov 28 07:11:36 crc kubenswrapper[4922]: I1128 07:11:36.407710 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" event={"ID":"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451","Type":"ContainerDied","Data":"6273c43917dad7bc4e1b54a64332f44901a12fe37747fde9351a659a2d5ad0d2"} Nov 28 07:11:37 crc kubenswrapper[4922]: I1128 07:11:37.789693 4922 generic.go:334] "Generic (PLEG): container finished" podID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerID="43e6cd7edf9ce368bb22ffc87aa7eed87b90288ea2336d862ca7edb3636be9b3" exitCode=0 Nov 28 07:11:37 crc kubenswrapper[4922]: I1128 07:11:37.789779 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" event={"ID":"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451","Type":"ContainerDied","Data":"43e6cd7edf9ce368bb22ffc87aa7eed87b90288ea2336d862ca7edb3636be9b3"} Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.114100 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.292563 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfqtw\" (UniqueName: \"kubernetes.io/projected/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-kube-api-access-sfqtw\") pod \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.292667 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-util\") pod \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.292738 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-bundle\") pod \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\" (UID: \"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451\") " Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.293780 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-bundle" (OuterVolumeSpecName: "bundle") pod "960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" (UID: "960497bd-3a43-4bb6-b3b6-b8bdf0ddd451"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.299157 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-kube-api-access-sfqtw" (OuterVolumeSpecName: "kube-api-access-sfqtw") pod "960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" (UID: "960497bd-3a43-4bb6-b3b6-b8bdf0ddd451"). InnerVolumeSpecName "kube-api-access-sfqtw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.306713 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-util" (OuterVolumeSpecName: "util") pod "960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" (UID: "960497bd-3a43-4bb6-b3b6-b8bdf0ddd451"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.394164 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfqtw\" (UniqueName: \"kubernetes.io/projected/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-kube-api-access-sfqtw\") on node \"crc\" DevicePath \"\"" Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.394240 4922 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-util\") on node \"crc\" DevicePath \"\"" Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.394262 4922 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/960497bd-3a43-4bb6-b3b6-b8bdf0ddd451-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.812339 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" event={"ID":"960497bd-3a43-4bb6-b3b6-b8bdf0ddd451","Type":"ContainerDied","Data":"c3259a8fffd3df49a2dde466a8e829abda0bb2b77857822c666f3196d4c88488"} Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.812402 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3259a8fffd3df49a2dde466a8e829abda0bb2b77857822c666f3196d4c88488" Nov 28 07:11:39 crc kubenswrapper[4922]: I1128 07:11:39.812483 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.641207 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc"] Nov 28 07:11:43 crc kubenswrapper[4922]: E1128 07:11:43.641966 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerName="extract" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.641986 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerName="extract" Nov 28 07:11:43 crc kubenswrapper[4922]: E1128 07:11:43.642004 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerName="pull" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.642016 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerName="pull" Nov 28 07:11:43 crc kubenswrapper[4922]: E1128 07:11:43.642046 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerName="util" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.642058 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerName="util" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.642301 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="960497bd-3a43-4bb6-b3b6-b8bdf0ddd451" containerName="extract" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.642957 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.647258 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-pk754" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.658472 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc"] Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.670241 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2q2h\" (UniqueName: \"kubernetes.io/projected/0474c723-3f2a-42b6-a1c7-ff0472731025-kube-api-access-j2q2h\") pod \"openstack-operator-controller-operator-67d8f6cc56-bdwdc\" (UID: \"0474c723-3f2a-42b6-a1c7-ff0472731025\") " pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.771161 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2q2h\" (UniqueName: \"kubernetes.io/projected/0474c723-3f2a-42b6-a1c7-ff0472731025-kube-api-access-j2q2h\") pod \"openstack-operator-controller-operator-67d8f6cc56-bdwdc\" (UID: \"0474c723-3f2a-42b6-a1c7-ff0472731025\") " pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.798488 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2q2h\" (UniqueName: \"kubernetes.io/projected/0474c723-3f2a-42b6-a1c7-ff0472731025-kube-api-access-j2q2h\") pod \"openstack-operator-controller-operator-67d8f6cc56-bdwdc\" (UID: \"0474c723-3f2a-42b6-a1c7-ff0472731025\") " pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" Nov 28 07:11:43 crc kubenswrapper[4922]: I1128 07:11:43.977771 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" Nov 28 07:11:44 crc kubenswrapper[4922]: I1128 07:11:44.489154 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc"] Nov 28 07:11:44 crc kubenswrapper[4922]: I1128 07:11:44.840889 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" event={"ID":"0474c723-3f2a-42b6-a1c7-ff0472731025","Type":"ContainerStarted","Data":"ee0f9a7b8a822c9f73cae5e74c3a1ee5a56e2c02049b96b5e222891b54207347"} Nov 28 07:11:49 crc kubenswrapper[4922]: I1128 07:11:49.877922 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" event={"ID":"0474c723-3f2a-42b6-a1c7-ff0472731025","Type":"ContainerStarted","Data":"1ffd50fa821314afcf888034c1f370c6eca748fd888e5382c8bbc3f0ef7a5955"} Nov 28 07:11:50 crc kubenswrapper[4922]: I1128 07:11:50.883574 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" Nov 28 07:11:50 crc kubenswrapper[4922]: I1128 07:11:50.919981 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" podStartSLOduration=4.013576287 podStartE2EDuration="7.919962855s" podCreationTimestamp="2025-11-28 07:11:43 +0000 UTC" firstStartedPulling="2025-11-28 07:11:44.505405469 +0000 UTC m=+1149.425801051" lastFinishedPulling="2025-11-28 07:11:48.411792037 +0000 UTC m=+1153.332187619" observedRunningTime="2025-11-28 07:11:50.911950411 +0000 UTC m=+1155.832346033" watchObservedRunningTime="2025-11-28 07:11:50.919962855 +0000 UTC m=+1155.840358437" Nov 28 07:11:53 crc kubenswrapper[4922]: I1128 07:11:53.981415 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-67d8f6cc56-bdwdc" Nov 28 07:11:57 crc kubenswrapper[4922]: I1128 07:11:57.312619 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:11:57 crc kubenswrapper[4922]: I1128 07:11:57.312703 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.578483 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.580544 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.582282 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-k4xxb" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.583881 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.584862 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.590607 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-r7fqw" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.594432 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.601570 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.620847 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.638881 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.643978 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-2p48d" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.658085 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-6kwvm"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.668928 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.700911 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-hhxt5" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.709441 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-886sx\" (UniqueName: \"kubernetes.io/projected/3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be-kube-api-access-886sx\") pod \"glance-operator-controller-manager-589cbd6b5b-89z98\" (UID: \"3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.709503 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rdbg\" (UniqueName: \"kubernetes.io/projected/5b5e379b-d7c5-45ae-81f8-cebce05059a0-kube-api-access-2rdbg\") pod \"cinder-operator-controller-manager-6b7f75547b-njqq9\" (UID: \"5b5e379b-d7c5-45ae-81f8-cebce05059a0\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.709533 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb68m\" (UniqueName: \"kubernetes.io/projected/743984bb-d44f-4721-b9a0-d12f71feb3e9-kube-api-access-cb68m\") pod \"barbican-operator-controller-manager-7b64f4fb85-7wc2c\" (UID: \"743984bb-d44f-4721-b9a0-d12f71feb3e9\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.716353 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.720659 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-6kwvm"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.736283 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.737407 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.738537 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.739462 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.743639 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-6t826" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.746079 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-8wglt" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.754890 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.763038 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.764343 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.767933 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.773678 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-q2z28" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.773864 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.775785 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.784657 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.785627 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.787382 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-flnhp" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.794460 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.796601 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.805579 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-rljxw" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.812266 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55hw5\" (UniqueName: \"kubernetes.io/projected/b0972771-f2d5-4f8d-91cb-ef2fde0b536b-kube-api-access-55hw5\") pod \"horizon-operator-controller-manager-5d494799bf-htnmk\" (UID: \"b0972771-f2d5-4f8d-91cb-ef2fde0b536b\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.812336 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rdbg\" (UniqueName: \"kubernetes.io/projected/5b5e379b-d7c5-45ae-81f8-cebce05059a0-kube-api-access-2rdbg\") pod \"cinder-operator-controller-manager-6b7f75547b-njqq9\" (UID: \"5b5e379b-d7c5-45ae-81f8-cebce05059a0\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.812382 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb68m\" (UniqueName: \"kubernetes.io/projected/743984bb-d44f-4721-b9a0-d12f71feb3e9-kube-api-access-cb68m\") pod \"barbican-operator-controller-manager-7b64f4fb85-7wc2c\" (UID: \"743984bb-d44f-4721-b9a0-d12f71feb3e9\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.812410 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpdxl\" (UniqueName: \"kubernetes.io/projected/8577290f-ffdd-49ed-8666-6d5ca2323102-kube-api-access-dpdxl\") pod \"designate-operator-controller-manager-955677c94-6kwvm\" (UID: \"8577290f-ffdd-49ed-8666-6d5ca2323102\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.812471 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-886sx\" (UniqueName: \"kubernetes.io/projected/3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be-kube-api-access-886sx\") pod \"glance-operator-controller-manager-589cbd6b5b-89z98\" (UID: \"3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.812504 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sl5h\" (UniqueName: \"kubernetes.io/projected/a428f994-d14f-4a4a-aec2-ce114d56f7a9-kube-api-access-4sl5h\") pod \"heat-operator-controller-manager-5b77f656f-vggwt\" (UID: \"a428f994-d14f-4a4a-aec2-ce114d56f7a9\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.833283 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.844643 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.847768 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb68m\" (UniqueName: \"kubernetes.io/projected/743984bb-d44f-4721-b9a0-d12f71feb3e9-kube-api-access-cb68m\") pod \"barbican-operator-controller-manager-7b64f4fb85-7wc2c\" (UID: \"743984bb-d44f-4721-b9a0-d12f71feb3e9\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.847811 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rdbg\" (UniqueName: \"kubernetes.io/projected/5b5e379b-d7c5-45ae-81f8-cebce05059a0-kube-api-access-2rdbg\") pod \"cinder-operator-controller-manager-6b7f75547b-njqq9\" (UID: \"5b5e379b-d7c5-45ae-81f8-cebce05059a0\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.860338 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-886sx\" (UniqueName: \"kubernetes.io/projected/3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be-kube-api-access-886sx\") pod \"glance-operator-controller-manager-589cbd6b5b-89z98\" (UID: \"3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.879292 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.880815 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.886463 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.886788 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-ds4v8" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.887907 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.894154 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-cj49f" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.909788 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.913739 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tjvm\" (UniqueName: \"kubernetes.io/projected/9c2b26fd-dac9-45ae-b100-b46c85c11506-kube-api-access-6tjvm\") pod \"ironic-operator-controller-manager-67cb4dc6d4-cdnsq\" (UID: \"9c2b26fd-dac9-45ae-b100-b46c85c11506\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.913921 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwc69\" (UniqueName: \"kubernetes.io/projected/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-kube-api-access-kwc69\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.914068 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sl5h\" (UniqueName: \"kubernetes.io/projected/a428f994-d14f-4a4a-aec2-ce114d56f7a9-kube-api-access-4sl5h\") pod \"heat-operator-controller-manager-5b77f656f-vggwt\" (UID: \"a428f994-d14f-4a4a-aec2-ce114d56f7a9\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.914266 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55hw5\" (UniqueName: \"kubernetes.io/projected/b0972771-f2d5-4f8d-91cb-ef2fde0b536b-kube-api-access-55hw5\") pod \"horizon-operator-controller-manager-5d494799bf-htnmk\" (UID: \"b0972771-f2d5-4f8d-91cb-ef2fde0b536b\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.914384 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vks44\" (UniqueName: \"kubernetes.io/projected/1ecf6399-ca02-4591-89cd-1a0731a5a75c-kube-api-access-vks44\") pod \"keystone-operator-controller-manager-7b4567c7cf-5m4mz\" (UID: \"1ecf6399-ca02-4591-89cd-1a0731a5a75c\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.914496 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpdxl\" (UniqueName: \"kubernetes.io/projected/8577290f-ffdd-49ed-8666-6d5ca2323102-kube-api-access-dpdxl\") pod \"designate-operator-controller-manager-955677c94-6kwvm\" (UID: \"8577290f-ffdd-49ed-8666-6d5ca2323102\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.914605 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.930392 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.946693 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.947996 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.950192 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.954129 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55hw5\" (UniqueName: \"kubernetes.io/projected/b0972771-f2d5-4f8d-91cb-ef2fde0b536b-kube-api-access-55hw5\") pod \"horizon-operator-controller-manager-5d494799bf-htnmk\" (UID: \"b0972771-f2d5-4f8d-91cb-ef2fde0b536b\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.959783 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sl5h\" (UniqueName: \"kubernetes.io/projected/a428f994-d14f-4a4a-aec2-ce114d56f7a9-kube-api-access-4sl5h\") pod \"heat-operator-controller-manager-5b77f656f-vggwt\" (UID: \"a428f994-d14f-4a4a-aec2-ce114d56f7a9\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.960119 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-pgpfz" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.962028 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.962732 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpdxl\" (UniqueName: \"kubernetes.io/projected/8577290f-ffdd-49ed-8666-6d5ca2323102-kube-api-access-dpdxl\") pod \"designate-operator-controller-manager-955677c94-6kwvm\" (UID: \"8577290f-ffdd-49ed-8666-6d5ca2323102\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.962976 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.965508 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-2jbdj" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.972696 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.973529 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.973640 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.984814 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t"] Nov 28 07:12:12 crc kubenswrapper[4922]: I1128 07:12:12.992434 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-p7ptf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.011320 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.015270 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8652\" (UniqueName: \"kubernetes.io/projected/b32f9a0b-4c07-433c-b02a-aed24265ae16-kube-api-access-q8652\") pod \"neutron-operator-controller-manager-6fdcddb789-5bl8t\" (UID: \"b32f9a0b-4c07-433c-b02a-aed24265ae16\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.015308 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.015343 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcfs7\" (UniqueName: \"kubernetes.io/projected/31bc74a9-0e17-4400-bef5-d36ff53831dd-kube-api-access-xcfs7\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-bthkr\" (UID: \"31bc74a9-0e17-4400-bef5-d36ff53831dd\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.015368 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzcmt\" (UniqueName: \"kubernetes.io/projected/0a20a7e6-e9a5-433a-bf5e-d8f62053f611-kube-api-access-vzcmt\") pod \"manila-operator-controller-manager-5d499bf58b-4csdv\" (UID: \"0a20a7e6-e9a5-433a-bf5e-d8f62053f611\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.015389 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tjvm\" (UniqueName: \"kubernetes.io/projected/9c2b26fd-dac9-45ae-b100-b46c85c11506-kube-api-access-6tjvm\") pod \"ironic-operator-controller-manager-67cb4dc6d4-cdnsq\" (UID: \"9c2b26fd-dac9-45ae-b100-b46c85c11506\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.015408 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q52j2\" (UniqueName: \"kubernetes.io/projected/c905882d-e743-49a3-b331-f3b7cc0d8649-kube-api-access-q52j2\") pod \"nova-operator-controller-manager-79556f57fc-jbsc8\" (UID: \"c905882d-e743-49a3-b331-f3b7cc0d8649\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.015429 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwc69\" (UniqueName: \"kubernetes.io/projected/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-kube-api-access-kwc69\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.015477 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vks44\" (UniqueName: \"kubernetes.io/projected/1ecf6399-ca02-4591-89cd-1a0731a5a75c-kube-api-access-vks44\") pod \"keystone-operator-controller-manager-7b4567c7cf-5m4mz\" (UID: \"1ecf6399-ca02-4591-89cd-1a0731a5a75c\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.015785 4922 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.015822 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert podName:a2b17b7d-a398-4ce6-9e4a-6d80e1e97369 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:13.515809602 +0000 UTC m=+1178.436205174 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert") pod "infra-operator-controller-manager-57548d458d-x2bzf" (UID: "a2b17b7d-a398-4ce6-9e4a-6d80e1e97369") : secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.019956 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.031948 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.039948 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tjvm\" (UniqueName: \"kubernetes.io/projected/9c2b26fd-dac9-45ae-b100-b46c85c11506-kube-api-access-6tjvm\") pod \"ironic-operator-controller-manager-67cb4dc6d4-cdnsq\" (UID: \"9c2b26fd-dac9-45ae-b100-b46c85c11506\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.040403 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwc69\" (UniqueName: \"kubernetes.io/projected/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-kube-api-access-kwc69\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.042429 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vks44\" (UniqueName: \"kubernetes.io/projected/1ecf6399-ca02-4591-89cd-1a0731a5a75c-kube-api-access-vks44\") pod \"keystone-operator-controller-manager-7b4567c7cf-5m4mz\" (UID: \"1ecf6399-ca02-4591-89cd-1a0731a5a75c\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.051288 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.066613 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.082634 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.093956 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.094932 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.124336 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8652\" (UniqueName: \"kubernetes.io/projected/b32f9a0b-4c07-433c-b02a-aed24265ae16-kube-api-access-q8652\") pod \"neutron-operator-controller-manager-6fdcddb789-5bl8t\" (UID: \"b32f9a0b-4c07-433c-b02a-aed24265ae16\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.124415 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcfs7\" (UniqueName: \"kubernetes.io/projected/31bc74a9-0e17-4400-bef5-d36ff53831dd-kube-api-access-xcfs7\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-bthkr\" (UID: \"31bc74a9-0e17-4400-bef5-d36ff53831dd\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.124440 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzcmt\" (UniqueName: \"kubernetes.io/projected/0a20a7e6-e9a5-433a-bf5e-d8f62053f611-kube-api-access-vzcmt\") pod \"manila-operator-controller-manager-5d499bf58b-4csdv\" (UID: \"0a20a7e6-e9a5-433a-bf5e-d8f62053f611\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.124465 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q52j2\" (UniqueName: \"kubernetes.io/projected/c905882d-e743-49a3-b331-f3b7cc0d8649-kube-api-access-q52j2\") pod \"nova-operator-controller-manager-79556f57fc-jbsc8\" (UID: \"c905882d-e743-49a3-b331-f3b7cc0d8649\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.124495 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rklk7\" (UniqueName: \"kubernetes.io/projected/fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d-kube-api-access-rklk7\") pod \"octavia-operator-controller-manager-64cdc6ff96-74d6v\" (UID: \"fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.126836 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.128655 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-cs4n7" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.132683 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.168583 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8652\" (UniqueName: \"kubernetes.io/projected/b32f9a0b-4c07-433c-b02a-aed24265ae16-kube-api-access-q8652\") pod \"neutron-operator-controller-manager-6fdcddb789-5bl8t\" (UID: \"b32f9a0b-4c07-433c-b02a-aed24265ae16\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.173808 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q52j2\" (UniqueName: \"kubernetes.io/projected/c905882d-e743-49a3-b331-f3b7cc0d8649-kube-api-access-q52j2\") pod \"nova-operator-controller-manager-79556f57fc-jbsc8\" (UID: \"c905882d-e743-49a3-b331-f3b7cc0d8649\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.175895 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcfs7\" (UniqueName: \"kubernetes.io/projected/31bc74a9-0e17-4400-bef5-d36ff53831dd-kube-api-access-xcfs7\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-bthkr\" (UID: \"31bc74a9-0e17-4400-bef5-d36ff53831dd\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.187120 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzcmt\" (UniqueName: \"kubernetes.io/projected/0a20a7e6-e9a5-433a-bf5e-d8f62053f611-kube-api-access-vzcmt\") pod \"manila-operator-controller-manager-5d499bf58b-4csdv\" (UID: \"0a20a7e6-e9a5-433a-bf5e-d8f62053f611\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.196702 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.217500 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.218516 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.219728 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.223832 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.225540 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hngfl\" (UniqueName: \"kubernetes.io/projected/ff496359-1983-4a83-a344-3fb11e4587d9-kube-api-access-hngfl\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.225632 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rklk7\" (UniqueName: \"kubernetes.io/projected/fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d-kube-api-access-rklk7\") pod \"octavia-operator-controller-manager-64cdc6ff96-74d6v\" (UID: \"fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.225669 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.235513 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-s9swh" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.247853 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.248923 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.258651 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-kgcqk" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.266510 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.293171 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.300096 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.302778 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rklk7\" (UniqueName: \"kubernetes.io/projected/fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d-kube-api-access-rklk7\") pod \"octavia-operator-controller-manager-64cdc6ff96-74d6v\" (UID: \"fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.315425 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.316764 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.320000 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-9j8kx" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.323592 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.327532 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hngfl\" (UniqueName: \"kubernetes.io/projected/ff496359-1983-4a83-a344-3fb11e4587d9-kube-api-access-hngfl\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.327832 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6gwq\" (UniqueName: \"kubernetes.io/projected/210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b-kube-api-access-b6gwq\") pod \"placement-operator-controller-manager-57988cc5b5-jgwt8\" (UID: \"210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.327863 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.327887 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fnn4\" (UniqueName: \"kubernetes.io/projected/5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf-kube-api-access-9fnn4\") pod \"ovn-operator-controller-manager-56897c768d-rl77z\" (UID: \"5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.328538 4922 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.328581 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert podName:ff496359-1983-4a83-a344-3fb11e4587d9 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:13.828568593 +0000 UTC m=+1178.748964175 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert") pod "openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" (UID: "ff496359-1983-4a83-a344-3fb11e4587d9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.330747 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.340421 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.342847 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.352492 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hngfl\" (UniqueName: \"kubernetes.io/projected/ff496359-1983-4a83-a344-3fb11e4587d9-kube-api-access-hngfl\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.356116 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.357271 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.367647 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-4p66p" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.378029 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.397464 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.398523 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.403794 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-jlgpf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.414857 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.414894 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.415835 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.422264 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.423235 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-qstpz" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.428952 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fnn4\" (UniqueName: \"kubernetes.io/projected/5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf-kube-api-access-9fnn4\") pod \"ovn-operator-controller-manager-56897c768d-rl77z\" (UID: \"5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.428994 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rw248\" (UniqueName: \"kubernetes.io/projected/89dd35b2-343e-4e06-9fac-6474b93b294e-kube-api-access-rw248\") pod \"telemetry-operator-controller-manager-76cc84c6bb-5hpq8\" (UID: \"89dd35b2-343e-4e06-9fac-6474b93b294e\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.429052 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svv6d\" (UniqueName: \"kubernetes.io/projected/bbf78ced-8521-464d-a517-8ca721301421-kube-api-access-svv6d\") pod \"swift-operator-controller-manager-d77b94747-4f6j5\" (UID: \"bbf78ced-8521-464d-a517-8ca721301421\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.429088 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6gwq\" (UniqueName: \"kubernetes.io/projected/210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b-kube-api-access-b6gwq\") pod \"placement-operator-controller-manager-57988cc5b5-jgwt8\" (UID: \"210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.447059 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.447923 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.454644 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.454659 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-dktq5" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.454823 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.455466 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fnn4\" (UniqueName: \"kubernetes.io/projected/5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf-kube-api-access-9fnn4\") pod \"ovn-operator-controller-manager-56897c768d-rl77z\" (UID: \"5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.455516 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.458250 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6gwq\" (UniqueName: \"kubernetes.io/projected/210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b-kube-api-access-b6gwq\") pod \"placement-operator-controller-manager-57988cc5b5-jgwt8\" (UID: \"210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.458478 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.470565 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.470687 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-2dx54" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.478612 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.532987 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4sfj\" (UniqueName: \"kubernetes.io/projected/857fa43c-dddb-46c5-88e2-00e359b43c66-kube-api-access-z4sfj\") pod \"test-operator-controller-manager-5cd6c7f4c8-7mrxz\" (UID: \"857fa43c-dddb-46c5-88e2-00e359b43c66\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.533055 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.533088 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.533118 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rw248\" (UniqueName: \"kubernetes.io/projected/89dd35b2-343e-4e06-9fac-6474b93b294e-kube-api-access-rw248\") pod \"telemetry-operator-controller-manager-76cc84c6bb-5hpq8\" (UID: \"89dd35b2-343e-4e06-9fac-6474b93b294e\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.533135 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzg84\" (UniqueName: \"kubernetes.io/projected/b46e91a2-650c-4ae1-afa0-6edc6768c99e-kube-api-access-qzg84\") pod \"watcher-operator-controller-manager-656dcb59d4-h76jf\" (UID: \"b46e91a2-650c-4ae1-afa0-6edc6768c99e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.533167 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvzdm\" (UniqueName: \"kubernetes.io/projected/7427e034-38e4-460d-aefa-15842840a7c0-kube-api-access-fvzdm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zq5v4\" (UID: \"7427e034-38e4-460d-aefa-15842840a7c0\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.533234 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msth9\" (UniqueName: \"kubernetes.io/projected/5f800348-9092-4701-a85d-3bb8f40b51bd-kube-api-access-msth9\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.533281 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.533300 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svv6d\" (UniqueName: \"kubernetes.io/projected/bbf78ced-8521-464d-a517-8ca721301421-kube-api-access-svv6d\") pod \"swift-operator-controller-manager-d77b94747-4f6j5\" (UID: \"bbf78ced-8521-464d-a517-8ca721301421\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.534419 4922 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.534479 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert podName:a2b17b7d-a398-4ce6-9e4a-6d80e1e97369 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:14.534460519 +0000 UTC m=+1179.454856171 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert") pod "infra-operator-controller-manager-57548d458d-x2bzf" (UID: "a2b17b7d-a398-4ce6-9e4a-6d80e1e97369") : secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.560602 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svv6d\" (UniqueName: \"kubernetes.io/projected/bbf78ced-8521-464d-a517-8ca721301421-kube-api-access-svv6d\") pod \"swift-operator-controller-manager-d77b94747-4f6j5\" (UID: \"bbf78ced-8521-464d-a517-8ca721301421\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.568266 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rw248\" (UniqueName: \"kubernetes.io/projected/89dd35b2-343e-4e06-9fac-6474b93b294e-kube-api-access-rw248\") pod \"telemetry-operator-controller-manager-76cc84c6bb-5hpq8\" (UID: \"89dd35b2-343e-4e06-9fac-6474b93b294e\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.599528 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.634206 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.634271 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzg84\" (UniqueName: \"kubernetes.io/projected/b46e91a2-650c-4ae1-afa0-6edc6768c99e-kube-api-access-qzg84\") pod \"watcher-operator-controller-manager-656dcb59d4-h76jf\" (UID: \"b46e91a2-650c-4ae1-afa0-6edc6768c99e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.634295 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvzdm\" (UniqueName: \"kubernetes.io/projected/7427e034-38e4-460d-aefa-15842840a7c0-kube-api-access-fvzdm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zq5v4\" (UID: \"7427e034-38e4-460d-aefa-15842840a7c0\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.634347 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msth9\" (UniqueName: \"kubernetes.io/projected/5f800348-9092-4701-a85d-3bb8f40b51bd-kube-api-access-msth9\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.634427 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4sfj\" (UniqueName: \"kubernetes.io/projected/857fa43c-dddb-46c5-88e2-00e359b43c66-kube-api-access-z4sfj\") pod \"test-operator-controller-manager-5cd6c7f4c8-7mrxz\" (UID: \"857fa43c-dddb-46c5-88e2-00e359b43c66\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.634465 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.634724 4922 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.634817 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:14.134799139 +0000 UTC m=+1179.055194721 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.644098 4922 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.644202 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:14.144182449 +0000 UTC m=+1179.064578031 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "metrics-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.657095 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzg84\" (UniqueName: \"kubernetes.io/projected/b46e91a2-650c-4ae1-afa0-6edc6768c99e-kube-api-access-qzg84\") pod \"watcher-operator-controller-manager-656dcb59d4-h76jf\" (UID: \"b46e91a2-650c-4ae1-afa0-6edc6768c99e\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.661054 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msth9\" (UniqueName: \"kubernetes.io/projected/5f800348-9092-4701-a85d-3bb8f40b51bd-kube-api-access-msth9\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.661512 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4sfj\" (UniqueName: \"kubernetes.io/projected/857fa43c-dddb-46c5-88e2-00e359b43c66-kube-api-access-z4sfj\") pod \"test-operator-controller-manager-5cd6c7f4c8-7mrxz\" (UID: \"857fa43c-dddb-46c5-88e2-00e359b43c66\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.662081 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvzdm\" (UniqueName: \"kubernetes.io/projected/7427e034-38e4-460d-aefa-15842840a7c0-kube-api-access-fvzdm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-zq5v4\" (UID: \"7427e034-38e4-460d-aefa-15842840a7c0\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.732328 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.755007 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.783854 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.795104 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.809692 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.828819 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.836772 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.836953 4922 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: E1128 07:12:13.837025 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert podName:ff496359-1983-4a83-a344-3fb11e4587d9 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:14.837005437 +0000 UTC m=+1179.757401019 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert") pod "openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" (UID: "ff496359-1983-4a83-a344-3fb11e4587d9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.876819 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4" Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.904266 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9"] Nov 28 07:12:13 crc kubenswrapper[4922]: I1128 07:12:13.916827 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.049273 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" event={"ID":"3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be","Type":"ContainerStarted","Data":"60a42eaee11b52b2129f764b90348ec958fba1b896def84320731e270f175bdb"} Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.061136 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.061304 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" event={"ID":"5b5e379b-d7c5-45ae-81f8-cebce05059a0","Type":"ContainerStarted","Data":"2dcf37f04035da894e1b3224181817690f8afce25709238c1ed2a8df75f3bcc5"} Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.061423 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" event={"ID":"743984bb-d44f-4721-b9a0-d12f71feb3e9","Type":"ContainerStarted","Data":"e45535f150fa019ad5a1b6b30f3076a5696a71fff5ce7b661140c512fadabed5"} Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.145950 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.146020 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.146284 4922 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.146341 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:15.146324596 +0000 UTC m=+1180.066720178 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "webhook-server-cert" not found Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.146702 4922 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.146738 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:15.146728177 +0000 UTC m=+1180.067123759 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "metrics-server-cert" not found Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.232319 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.240196 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.282138 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.296061 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-6kwvm"] Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.299302 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8577290f_ffdd_49ed_8666_6d5ca2323102.slice/crio-3a53d889c0d6b8621f0421c62065ac48a2916c488db9ae179f05497f9eecd74f WatchSource:0}: Error finding container 3a53d889c0d6b8621f0421c62065ac48a2916c488db9ae179f05497f9eecd74f: Status 404 returned error can't find the container with id 3a53d889c0d6b8621f0421c62065ac48a2916c488db9ae179f05497f9eecd74f Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.303116 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfafc81ca_dc80_4ef4_a8e3_9e63f5f45a2d.slice/crio-522db650ef3dd70f2771abe040413b68647e60cc5d0d2a6ce726e46f097209d7 WatchSource:0}: Error finding container 522db650ef3dd70f2771abe040413b68647e60cc5d0d2a6ce726e46f097209d7: Status 404 returned error can't find the container with id 522db650ef3dd70f2771abe040413b68647e60cc5d0d2a6ce726e46f097209d7 Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.317008 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.325029 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.405212 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t"] Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.413240 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc905882d_e743_49a3_b331_f3b7cc0d8649.slice/crio-d6b3a6abcd5924a36ec84726f1d2acff68137972ac2d086e34abae55633eee60 WatchSource:0}: Error finding container d6b3a6abcd5924a36ec84726f1d2acff68137972ac2d086e34abae55633eee60: Status 404 returned error can't find the container with id d6b3a6abcd5924a36ec84726f1d2acff68137972ac2d086e34abae55633eee60 Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.413283 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.419648 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8"] Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.423528 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ab2d7c3_71a5_4337_acee_8ed5e7f09dbf.slice/crio-714addd086a87828bbc2141623d285ccdb77a6ddf6816f8ed4fd10b0d02005d0 WatchSource:0}: Error finding container 714addd086a87828bbc2141623d285ccdb77a6ddf6816f8ed4fd10b0d02005d0: Status 404 returned error can't find the container with id 714addd086a87828bbc2141623d285ccdb77a6ddf6816f8ed4fd10b0d02005d0 Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.423987 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod210cb1b4_9abb_454e_9f7e_9dbd52ec0c9b.slice/crio-e77ed55e465c26364d0a72fcd0714e3b7dd2ed0a6e584bbf9623ad7073c66162 WatchSource:0}: Error finding container e77ed55e465c26364d0a72fcd0714e3b7dd2ed0a6e584bbf9623ad7073c66162: Status 404 returned error can't find the container with id e77ed55e465c26364d0a72fcd0714e3b7dd2ed0a6e584bbf9623ad7073c66162 Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.427269 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr"] Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.427736 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b6gwq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-jgwt8_openstack-operators(210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.428126 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9fnn4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-56897c768d-rl77z_openstack-operators(5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.430522 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b6gwq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-jgwt8_openstack-operators(210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.431024 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9fnn4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-56897c768d-rl77z_openstack-operators(5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.432006 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" podUID="210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.432674 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" podUID="5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf" Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.435435 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.549475 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8"] Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.554157 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb46e91a2_650c_4ae1_afa0_6edc6768c99e.slice/crio-3965ebb61134d98065739586cb8c3dbea8c36a8f03e3c545f1e3a2edb45dc090 WatchSource:0}: Error finding container 3965ebb61134d98065739586cb8c3dbea8c36a8f03e3c545f1e3a2edb45dc090: Status 404 returned error can't find the container with id 3965ebb61134d98065739586cb8c3dbea8c36a8f03e3c545f1e3a2edb45dc090 Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.555593 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89dd35b2_343e_4e06_9fac_6474b93b294e.slice/crio-06d2cc077e878ad8a74e99392a1c24a282893299f340a906758f292b6326c549 WatchSource:0}: Error finding container 06d2cc077e878ad8a74e99392a1c24a282893299f340a906758f292b6326c549: Status 404 returned error can't find the container with id 06d2cc077e878ad8a74e99392a1c24a282893299f340a906758f292b6326c549 Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.557314 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz"] Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.557722 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod857fa43c_dddb_46c5_88e2_00e359b43c66.slice/crio-1c094b3269b624f3122a7cd4628862da0c0bd40a1062aa6e643ec466e21902a7 WatchSource:0}: Error finding container 1c094b3269b624f3122a7cd4628862da0c0bd40a1062aa6e643ec466e21902a7: Status 404 returned error can't find the container with id 1c094b3269b624f3122a7cd4628862da0c0bd40a1062aa6e643ec466e21902a7 Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.560626 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rw248,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-5hpq8_openstack-operators(89dd35b2-343e-4e06-9fac-6474b93b294e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.560869 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qzg84,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-h76jf_openstack-operators(b46e91a2-650c-4ae1-afa0-6edc6768c99e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.561762 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z4sfj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-7mrxz_openstack-operators(857fa43c-dddb-46c5-88e2-00e359b43c66): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.562680 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qzg84,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-h76jf_openstack-operators(b46e91a2-650c-4ae1-afa0-6edc6768c99e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.562859 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rw248,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-5hpq8_openstack-operators(89dd35b2-343e-4e06-9fac-6474b93b294e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.564137 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" podUID="b46e91a2-650c-4ae1-afa0-6edc6768c99e" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.564188 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" podUID="89dd35b2-343e-4e06-9fac-6474b93b294e" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.566073 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z4sfj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-7mrxz_openstack-operators(857fa43c-dddb-46c5-88e2-00e359b43c66): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.567309 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" podUID="857fa43c-dddb-46c5-88e2-00e359b43c66" Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.567510 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.567723 4922 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.567808 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert podName:a2b17b7d-a398-4ce6-9e4a-6d80e1e97369 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:16.567787739 +0000 UTC m=+1181.488183321 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert") pod "infra-operator-controller-manager-57548d458d-x2bzf" (UID: "a2b17b7d-a398-4ce6-9e4a-6d80e1e97369") : secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.567847 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf"] Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.576811 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5"] Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.584905 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-svv6d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-4f6j5_openstack-operators(bbf78ced-8521-464d-a517-8ca721301421): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.587164 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-svv6d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-4f6j5_openstack-operators(bbf78ced-8521-464d-a517-8ca721301421): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.588901 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" podUID="bbf78ced-8521-464d-a517-8ca721301421" Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.642069 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4"] Nov 28 07:12:14 crc kubenswrapper[4922]: W1128 07:12:14.649024 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7427e034_38e4_460d_aefa_15842840a7c0.slice/crio-1c7fc8bf44e1ae3a1c2bdc540ee5f9992c0e1318a1137a21cd8f54a6deb205ba WatchSource:0}: Error finding container 1c7fc8bf44e1ae3a1c2bdc540ee5f9992c0e1318a1137a21cd8f54a6deb205ba: Status 404 returned error can't find the container with id 1c7fc8bf44e1ae3a1c2bdc540ee5f9992c0e1318a1137a21cd8f54a6deb205ba Nov 28 07:12:14 crc kubenswrapper[4922]: I1128 07:12:14.870951 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.871170 4922 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:14 crc kubenswrapper[4922]: E1128 07:12:14.871338 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert podName:ff496359-1983-4a83-a344-3fb11e4587d9 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:16.871300773 +0000 UTC m=+1181.791696385 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert") pod "openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" (UID: "ff496359-1983-4a83-a344-3fb11e4587d9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.071326 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" event={"ID":"31bc74a9-0e17-4400-bef5-d36ff53831dd","Type":"ContainerStarted","Data":"87bf442fdbc5ad8b327044eba0b2ab23db5c12996ab2e9b939fe1d4a83db8cb8"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.073771 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" event={"ID":"9c2b26fd-dac9-45ae-b100-b46c85c11506","Type":"ContainerStarted","Data":"99c2ec810865a54f8a238f848e97b6f5efe41597604a1d9f9b583bf67d7a779f"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.075137 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4" event={"ID":"7427e034-38e4-460d-aefa-15842840a7c0","Type":"ContainerStarted","Data":"1c7fc8bf44e1ae3a1c2bdc540ee5f9992c0e1318a1137a21cd8f54a6deb205ba"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.076996 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" event={"ID":"a428f994-d14f-4a4a-aec2-ce114d56f7a9","Type":"ContainerStarted","Data":"27706d4438d7ccbf5058a45b99fadb52bb608bef2e633cd36de216b13397beb3"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.078525 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" event={"ID":"89dd35b2-343e-4e06-9fac-6474b93b294e","Type":"ContainerStarted","Data":"06d2cc077e878ad8a74e99392a1c24a282893299f340a906758f292b6326c549"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.080427 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" event={"ID":"5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf","Type":"ContainerStarted","Data":"714addd086a87828bbc2141623d285ccdb77a6ddf6816f8ed4fd10b0d02005d0"} Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.080947 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" podUID="89dd35b2-343e-4e06-9fac-6474b93b294e" Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.082844 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" event={"ID":"bbf78ced-8521-464d-a517-8ca721301421","Type":"ContainerStarted","Data":"092bc7c24924a50c82236ff8362ac99640bace826b75b165b8735a0e940ca3f5"} Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.082973 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" podUID="5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf" Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.085265 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" event={"ID":"b46e91a2-650c-4ae1-afa0-6edc6768c99e","Type":"ContainerStarted","Data":"3965ebb61134d98065739586cb8c3dbea8c36a8f03e3c545f1e3a2edb45dc090"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.087040 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" event={"ID":"b0972771-f2d5-4f8d-91cb-ef2fde0b536b","Type":"ContainerStarted","Data":"be7da9200ee4d15a5b07de3265980930e874687ebe244c67902036d8e561e679"} Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.088196 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" podUID="bbf78ced-8521-464d-a517-8ca721301421" Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.089488 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" podUID="b46e91a2-650c-4ae1-afa0-6edc6768c99e" Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.089877 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" event={"ID":"fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d","Type":"ContainerStarted","Data":"522db650ef3dd70f2771abe040413b68647e60cc5d0d2a6ce726e46f097209d7"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.091631 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" event={"ID":"1ecf6399-ca02-4591-89cd-1a0731a5a75c","Type":"ContainerStarted","Data":"283bf90ad19ab3485162d8c5c51be8b646dd727695f64b63047215650102ba78"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.094153 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" event={"ID":"0a20a7e6-e9a5-433a-bf5e-d8f62053f611","Type":"ContainerStarted","Data":"a6bd5f718cbf842745c0033687b0aae54f4f2a973744911b9898dd3918e4f237"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.096295 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" event={"ID":"b32f9a0b-4c07-433c-b02a-aed24265ae16","Type":"ContainerStarted","Data":"52fff85d3a289a1cc9385ba414513eedb0346e7daed926496e45696f527520d8"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.106789 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" event={"ID":"c905882d-e743-49a3-b331-f3b7cc0d8649","Type":"ContainerStarted","Data":"d6b3a6abcd5924a36ec84726f1d2acff68137972ac2d086e34abae55633eee60"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.111515 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" event={"ID":"210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b","Type":"ContainerStarted","Data":"e77ed55e465c26364d0a72fcd0714e3b7dd2ed0a6e584bbf9623ad7073c66162"} Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.114616 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" podUID="210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b" Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.115659 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" event={"ID":"857fa43c-dddb-46c5-88e2-00e359b43c66","Type":"ContainerStarted","Data":"1c094b3269b624f3122a7cd4628862da0c0bd40a1062aa6e643ec466e21902a7"} Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.119700 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" podUID="857fa43c-dddb-46c5-88e2-00e359b43c66" Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.119889 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" event={"ID":"8577290f-ffdd-49ed-8666-6d5ca2323102","Type":"ContainerStarted","Data":"3a53d889c0d6b8621f0421c62065ac48a2916c488db9ae179f05497f9eecd74f"} Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.176540 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:15 crc kubenswrapper[4922]: I1128 07:12:15.176617 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.177864 4922 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.178121 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:17.178090944 +0000 UTC m=+1182.098486566 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "webhook-server-cert" not found Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.178591 4922 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 07:12:15 crc kubenswrapper[4922]: E1128 07:12:15.178674 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:17.178655129 +0000 UTC m=+1182.099050791 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "metrics-server-cert" not found Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.132207 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" podUID="5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf" Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.132289 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" podUID="89dd35b2-343e-4e06-9fac-6474b93b294e" Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.132360 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" podUID="210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b" Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.132501 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" podUID="857fa43c-dddb-46c5-88e2-00e359b43c66" Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.132561 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" podUID="b46e91a2-650c-4ae1-afa0-6edc6768c99e" Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.135531 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" podUID="bbf78ced-8521-464d-a517-8ca721301421" Nov 28 07:12:16 crc kubenswrapper[4922]: I1128 07:12:16.601622 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.602549 4922 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.603320 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert podName:a2b17b7d-a398-4ce6-9e4a-6d80e1e97369 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:20.602649359 +0000 UTC m=+1185.523044941 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert") pod "infra-operator-controller-manager-57548d458d-x2bzf" (UID: "a2b17b7d-a398-4ce6-9e4a-6d80e1e97369") : secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:16 crc kubenswrapper[4922]: I1128 07:12:16.906757 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.906961 4922 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:16 crc kubenswrapper[4922]: E1128 07:12:16.907035 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert podName:ff496359-1983-4a83-a344-3fb11e4587d9 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:20.907016395 +0000 UTC m=+1185.827411977 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert") pod "openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" (UID: "ff496359-1983-4a83-a344-3fb11e4587d9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:17 crc kubenswrapper[4922]: I1128 07:12:17.211976 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:17 crc kubenswrapper[4922]: I1128 07:12:17.212026 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:17 crc kubenswrapper[4922]: E1128 07:12:17.212135 4922 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 07:12:17 crc kubenswrapper[4922]: E1128 07:12:17.212132 4922 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 07:12:17 crc kubenswrapper[4922]: E1128 07:12:17.212181 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:21.212166783 +0000 UTC m=+1186.132562365 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "webhook-server-cert" not found Nov 28 07:12:17 crc kubenswrapper[4922]: E1128 07:12:17.212212 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:21.212192153 +0000 UTC m=+1186.132587735 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "metrics-server-cert" not found Nov 28 07:12:20 crc kubenswrapper[4922]: I1128 07:12:20.661273 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:20 crc kubenswrapper[4922]: E1128 07:12:20.661882 4922 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:20 crc kubenswrapper[4922]: E1128 07:12:20.662437 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert podName:a2b17b7d-a398-4ce6-9e4a-6d80e1e97369 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:28.662423763 +0000 UTC m=+1193.582819345 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert") pod "infra-operator-controller-manager-57548d458d-x2bzf" (UID: "a2b17b7d-a398-4ce6-9e4a-6d80e1e97369") : secret "infra-operator-webhook-server-cert" not found Nov 28 07:12:20 crc kubenswrapper[4922]: I1128 07:12:20.965304 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:20 crc kubenswrapper[4922]: E1128 07:12:20.965486 4922 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:20 crc kubenswrapper[4922]: E1128 07:12:20.965547 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert podName:ff496359-1983-4a83-a344-3fb11e4587d9 nodeName:}" failed. No retries permitted until 2025-11-28 07:12:28.965530866 +0000 UTC m=+1193.885926448 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert") pod "openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" (UID: "ff496359-1983-4a83-a344-3fb11e4587d9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 07:12:21 crc kubenswrapper[4922]: I1128 07:12:21.268811 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:21 crc kubenswrapper[4922]: I1128 07:12:21.269162 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:21 crc kubenswrapper[4922]: E1128 07:12:21.269014 4922 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 07:12:21 crc kubenswrapper[4922]: E1128 07:12:21.269305 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:29.269284415 +0000 UTC m=+1194.189680087 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "metrics-server-cert" not found Nov 28 07:12:21 crc kubenswrapper[4922]: E1128 07:12:21.269373 4922 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 07:12:21 crc kubenswrapper[4922]: E1128 07:12:21.269424 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:29.269407498 +0000 UTC m=+1194.189803080 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "webhook-server-cert" not found Nov 28 07:12:27 crc kubenswrapper[4922]: I1128 07:12:27.311851 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:12:27 crc kubenswrapper[4922]: I1128 07:12:27.312617 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:12:27 crc kubenswrapper[4922]: I1128 07:12:27.399881 4922 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 07:12:27 crc kubenswrapper[4922]: E1128 07:12:27.572205 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q52j2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-jbsc8_openstack-operators(c905882d-e743-49a3-b331-f3b7cc0d8649): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:27 crc kubenswrapper[4922]: E1128 07:12:27.573707 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" podUID="c905882d-e743-49a3-b331-f3b7cc0d8649" Nov 28 07:12:27 crc kubenswrapper[4922]: E1128 07:12:27.588543 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-55hw5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-5d494799bf-htnmk_openstack-operators(b0972771-f2d5-4f8d-91cb-ef2fde0b536b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:27 crc kubenswrapper[4922]: E1128 07:12:27.588656 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6tjvm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-cdnsq_openstack-operators(9c2b26fd-dac9-45ae-b100-b46c85c11506): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 07:12:27 crc kubenswrapper[4922]: E1128 07:12:27.590945 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" podUID="9c2b26fd-dac9-45ae-b100-b46c85c11506" Nov 28 07:12:27 crc kubenswrapper[4922]: E1128 07:12:27.591003 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" podUID="b0972771-f2d5-4f8d-91cb-ef2fde0b536b" Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.218672 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4" event={"ID":"7427e034-38e4-460d-aefa-15842840a7c0","Type":"ContainerStarted","Data":"208d9de0676c8f3fd4eb47e5fcc41e505f29bbfd552249227857370339057f25"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.223038 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" event={"ID":"3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be","Type":"ContainerStarted","Data":"5112f6a24131b25b8af1065d791ec0f28b56fee59f59fe0ba77112ad8f1d6de5"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.243889 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-zq5v4" podStartSLOduration=2.905951097 podStartE2EDuration="15.243875333s" podCreationTimestamp="2025-11-28 07:12:13 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.651185806 +0000 UTC m=+1179.571581388" lastFinishedPulling="2025-11-28 07:12:26.989110042 +0000 UTC m=+1191.909505624" observedRunningTime="2025-11-28 07:12:28.24113482 +0000 UTC m=+1193.161530402" watchObservedRunningTime="2025-11-28 07:12:28.243875333 +0000 UTC m=+1193.164270915" Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.251206 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" event={"ID":"c905882d-e743-49a3-b331-f3b7cc0d8649","Type":"ContainerStarted","Data":"58dc4bcaafb5daf75d300b3fe522add2cdc23ca363fe9e34261632cc1054d045"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.251955 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" Nov 28 07:12:28 crc kubenswrapper[4922]: E1128 07:12:28.254766 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" podUID="c905882d-e743-49a3-b331-f3b7cc0d8649" Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.257545 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" event={"ID":"b32f9a0b-4c07-433c-b02a-aed24265ae16","Type":"ContainerStarted","Data":"dc51a74e184009156782d0c18adf404e51c278b8c0244b770683f4df59327f5d"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.276658 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" event={"ID":"8577290f-ffdd-49ed-8666-6d5ca2323102","Type":"ContainerStarted","Data":"e72345bef7fc9b3c6df9ed8b78ec04aa243f70cdcc8499429cb5307605c33d4f"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.292315 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" event={"ID":"1ecf6399-ca02-4591-89cd-1a0731a5a75c","Type":"ContainerStarted","Data":"33657da3c628bbfc484be536ddb07f8c243df4fc965b3f7c8103d6f66311109b"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.320041 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" event={"ID":"b0972771-f2d5-4f8d-91cb-ef2fde0b536b","Type":"ContainerStarted","Data":"eda4681697e4c178c5cec77d542642ea596854b331b36fd57b8678b8dddc2565"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.320094 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.343901 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" event={"ID":"a428f994-d14f-4a4a-aec2-ce114d56f7a9","Type":"ContainerStarted","Data":"56a2433e02da557540061c19b40cc3410048921ff3634f6b024df8b328d919bb"} Nov 28 07:12:28 crc kubenswrapper[4922]: E1128 07:12:28.358658 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" podUID="b0972771-f2d5-4f8d-91cb-ef2fde0b536b" Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.358768 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" event={"ID":"0a20a7e6-e9a5-433a-bf5e-d8f62053f611","Type":"ContainerStarted","Data":"c529576800fe66c86e223d879c4628b37edc9fd7489ac5cd59e174726e760390"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.360374 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" event={"ID":"743984bb-d44f-4721-b9a0-d12f71feb3e9","Type":"ContainerStarted","Data":"f06d1d91a5459d247a2622b1016f8c8109987a3eabd7a4815c660e1ad94a8252"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.367913 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" event={"ID":"fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d","Type":"ContainerStarted","Data":"1cf4139edd125d18171a6c3445982f2b99efeec38f7fde30e2afdff54780d538"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.376055 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" event={"ID":"5b5e379b-d7c5-45ae-81f8-cebce05059a0","Type":"ContainerStarted","Data":"d3cc9c716db5a40fd58e5843eaf7cbdf7d25e0c28db9de9da4061ca6606e798a"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.381701 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" event={"ID":"31bc74a9-0e17-4400-bef5-d36ff53831dd","Type":"ContainerStarted","Data":"8061c00adb27236521fd677e986f69e202df92ab66f949780e668676e4ed804f"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.392009 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" event={"ID":"9c2b26fd-dac9-45ae-b100-b46c85c11506","Type":"ContainerStarted","Data":"ecd757c7a9e373612aa3a4662f44256c222d76b05458b2bac43fe2a6c3b7e060"} Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.392704 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" Nov 28 07:12:28 crc kubenswrapper[4922]: E1128 07:12:28.398436 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" podUID="9c2b26fd-dac9-45ae-b100-b46c85c11506" Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.724393 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.729901 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a2b17b7d-a398-4ce6-9e4a-6d80e1e97369-cert\") pod \"infra-operator-controller-manager-57548d458d-x2bzf\" (UID: \"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:28 crc kubenswrapper[4922]: I1128 07:12:28.995025 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:29 crc kubenswrapper[4922]: I1128 07:12:29.028182 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:29 crc kubenswrapper[4922]: I1128 07:12:29.045041 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ff496359-1983-4a83-a344-3fb11e4587d9-cert\") pod \"openstack-baremetal-operator-controller-manager-5d9f9695db9c48b\" (UID: \"ff496359-1983-4a83-a344-3fb11e4587d9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:29 crc kubenswrapper[4922]: I1128 07:12:29.074679 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:29 crc kubenswrapper[4922]: I1128 07:12:29.346915 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:29 crc kubenswrapper[4922]: I1128 07:12:29.346967 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:29 crc kubenswrapper[4922]: E1128 07:12:29.347092 4922 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 07:12:29 crc kubenswrapper[4922]: E1128 07:12:29.347149 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs podName:5f800348-9092-4701-a85d-3bb8f40b51bd nodeName:}" failed. No retries permitted until 2025-11-28 07:12:45.34713071 +0000 UTC m=+1210.267526292 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs") pod "openstack-operator-controller-manager-66f75ddbcc-b48l9" (UID: "5f800348-9092-4701-a85d-3bb8f40b51bd") : secret "webhook-server-cert" not found Nov 28 07:12:29 crc kubenswrapper[4922]: I1128 07:12:29.357473 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-metrics-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:29 crc kubenswrapper[4922]: E1128 07:12:29.400032 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" podUID="9c2b26fd-dac9-45ae-b100-b46c85c11506" Nov 28 07:12:29 crc kubenswrapper[4922]: E1128 07:12:29.400406 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" podUID="b0972771-f2d5-4f8d-91cb-ef2fde0b536b" Nov 28 07:12:29 crc kubenswrapper[4922]: E1128 07:12:29.400454 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" podUID="c905882d-e743-49a3-b331-f3b7cc0d8649" Nov 28 07:12:29 crc kubenswrapper[4922]: I1128 07:12:29.610809 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf"] Nov 28 07:12:29 crc kubenswrapper[4922]: I1128 07:12:29.629835 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b"] Nov 28 07:12:29 crc kubenswrapper[4922]: W1128 07:12:29.955663 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2b17b7d_a398_4ce6_9e4a_6d80e1e97369.slice/crio-a70a770fce33d3e53a95e3c2b04e83749bb1500c4b824a15000cea47c033c3d6 WatchSource:0}: Error finding container a70a770fce33d3e53a95e3c2b04e83749bb1500c4b824a15000cea47c033c3d6: Status 404 returned error can't find the container with id a70a770fce33d3e53a95e3c2b04e83749bb1500c4b824a15000cea47c033c3d6 Nov 28 07:12:30 crc kubenswrapper[4922]: I1128 07:12:30.412318 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" event={"ID":"ff496359-1983-4a83-a344-3fb11e4587d9","Type":"ContainerStarted","Data":"bcceabb1897f465b0981e3ac88730070e4c85d07a46d9e9ea2bae4dd3ebefdd2"} Nov 28 07:12:30 crc kubenswrapper[4922]: I1128 07:12:30.413897 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" event={"ID":"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369","Type":"ContainerStarted","Data":"a70a770fce33d3e53a95e3c2b04e83749bb1500c4b824a15000cea47c033c3d6"} Nov 28 07:12:33 crc kubenswrapper[4922]: I1128 07:12:33.086688 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" Nov 28 07:12:33 crc kubenswrapper[4922]: E1128 07:12:33.088560 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" podUID="b0972771-f2d5-4f8d-91cb-ef2fde0b536b" Nov 28 07:12:33 crc kubenswrapper[4922]: I1128 07:12:33.139262 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" Nov 28 07:12:33 crc kubenswrapper[4922]: E1128 07:12:33.141150 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" podUID="9c2b26fd-dac9-45ae-b100-b46c85c11506" Nov 28 07:12:33 crc kubenswrapper[4922]: I1128 07:12:33.333952 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" Nov 28 07:12:33 crc kubenswrapper[4922]: E1128 07:12:33.335745 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" podUID="c905882d-e743-49a3-b331-f3b7cc0d8649" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.552901 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" event={"ID":"210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b","Type":"ContainerStarted","Data":"9978c4ed54c32699ced6750251b10215baac9d97833f18cd39815aa870d0034f"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.560024 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" event={"ID":"89dd35b2-343e-4e06-9fac-6474b93b294e","Type":"ContainerStarted","Data":"bd4602d8cb05ab1843243bfa9fa146209c270e33611353de6d23004281b317ad"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.563287 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" event={"ID":"b46e91a2-650c-4ae1-afa0-6edc6768c99e","Type":"ContainerStarted","Data":"80efb29f9d36eb004c02b3a8aa353dc497e4aaefd2afa649fbb197262aec2cd2"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.569961 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" event={"ID":"857fa43c-dddb-46c5-88e2-00e359b43c66","Type":"ContainerStarted","Data":"dd4ce86565744d30ee5db499e0ae836240d2951150337e057a233145a5cfe66d"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.576653 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" event={"ID":"a428f994-d14f-4a4a-aec2-ce114d56f7a9","Type":"ContainerStarted","Data":"3edbbafe72c7fa6d77712dc2a20a22006d802e7d580ceb3e84acc1a00fb49fe3"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.577573 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.584968 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.588875 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" event={"ID":"fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d","Type":"ContainerStarted","Data":"31e8c2a53dc033bd0965808251adeeb0a402f3e0f2bd1eb7ddeb68ecd19b8e07"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.589609 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.594556 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.595830 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" event={"ID":"ff496359-1983-4a83-a344-3fb11e4587d9","Type":"ContainerStarted","Data":"bfb47b5aa1ca7ecde61a8515a3ae94ff6e81be6eb80a2abd7e7e9a390113c53a"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.597645 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" event={"ID":"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369","Type":"ContainerStarted","Data":"3452c228d24ade4460b27fa1eb6fcf0c41da9bf15bb8acfff50b881bf5c92beb"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.602925 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-vggwt" podStartSLOduration=2.864263814 podStartE2EDuration="26.602911054s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.237239403 +0000 UTC m=+1179.157634985" lastFinishedPulling="2025-11-28 07:12:37.975886603 +0000 UTC m=+1202.896282225" observedRunningTime="2025-11-28 07:12:38.602081382 +0000 UTC m=+1203.522476964" watchObservedRunningTime="2025-11-28 07:12:38.602911054 +0000 UTC m=+1203.523306636" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.634361 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.653926 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.663364 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" event={"ID":"5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf","Type":"ContainerStarted","Data":"870b6e5ba47b5b1a6328faca661ca3309b70ae06a93a55d8858d7873796e2312"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.689640 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" event={"ID":"1ecf6399-ca02-4591-89cd-1a0731a5a75c","Type":"ContainerStarted","Data":"aaf2f8926e25b1bedf1a8cb68c5d9ce8bc81a27bff12513ab5dc23d04e6c27dc"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.690768 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.717424 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-74d6v" podStartSLOduration=3.080820557 podStartE2EDuration="26.717403751s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.317970869 +0000 UTC m=+1179.238366451" lastFinishedPulling="2025-11-28 07:12:37.954554033 +0000 UTC m=+1202.874949645" observedRunningTime="2025-11-28 07:12:38.707609249 +0000 UTC m=+1203.628004831" watchObservedRunningTime="2025-11-28 07:12:38.717403751 +0000 UTC m=+1203.637799333" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.722178 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.732133 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.732761 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.760551 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" event={"ID":"bbf78ced-8521-464d-a517-8ca721301421","Type":"ContainerStarted","Data":"05482c562cfa20aaea2d31b18b4ca593dec280674c165042a001d2b8fb64dbbc"} Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.790575 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-5m4mz" podStartSLOduration=3.084640809 podStartE2EDuration="26.790557254s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.317462736 +0000 UTC m=+1179.237858308" lastFinishedPulling="2025-11-28 07:12:38.023379131 +0000 UTC m=+1202.943774753" observedRunningTime="2025-11-28 07:12:38.760326977 +0000 UTC m=+1203.680722559" watchObservedRunningTime="2025-11-28 07:12:38.790557254 +0000 UTC m=+1203.710952836" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.801443 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" podStartSLOduration=2.732367873 podStartE2EDuration="26.801424355s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:13.953126968 +0000 UTC m=+1178.873522550" lastFinishedPulling="2025-11-28 07:12:38.02218344 +0000 UTC m=+1202.942579032" observedRunningTime="2025-11-28 07:12:38.799306238 +0000 UTC m=+1203.719701840" watchObservedRunningTime="2025-11-28 07:12:38.801424355 +0000 UTC m=+1203.721819937" Nov 28 07:12:38 crc kubenswrapper[4922]: I1128 07:12:38.992558 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" podStartSLOduration=3.189605621 podStartE2EDuration="26.992535437s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.273113101 +0000 UTC m=+1179.193508683" lastFinishedPulling="2025-11-28 07:12:38.076042917 +0000 UTC m=+1202.996438499" observedRunningTime="2025-11-28 07:12:38.881510172 +0000 UTC m=+1203.801905754" watchObservedRunningTime="2025-11-28 07:12:38.992535437 +0000 UTC m=+1203.912931019" Nov 28 07:12:39 crc kubenswrapper[4922]: I1128 07:12:39.769349 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-4csdv" event={"ID":"0a20a7e6-e9a5-433a-bf5e-d8f62053f611","Type":"ContainerStarted","Data":"eefdda172b58727a500ccd96ffff1915ea8c3ec9eba8604869bfe6b6962cde45"} Nov 28 07:12:39 crc kubenswrapper[4922]: I1128 07:12:39.771481 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-89z98" event={"ID":"3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be","Type":"ContainerStarted","Data":"12e6dda6ca4a0ad8b41b09febbe176a21afa69c826b479a7a99841199038e213"} Nov 28 07:12:39 crc kubenswrapper[4922]: I1128 07:12:39.773439 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" event={"ID":"8577290f-ffdd-49ed-8666-6d5ca2323102","Type":"ContainerStarted","Data":"3519ee6e3414172ab96200b86b9d3be4afa3b6db99c0471faebd227812d59f96"} Nov 28 07:12:45 crc kubenswrapper[4922]: I1128 07:12:45.433852 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:45 crc kubenswrapper[4922]: I1128 07:12:45.446662 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5f800348-9092-4701-a85d-3bb8f40b51bd-webhook-certs\") pod \"openstack-operator-controller-manager-66f75ddbcc-b48l9\" (UID: \"5f800348-9092-4701-a85d-3bb8f40b51bd\") " pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:45 crc kubenswrapper[4922]: I1128 07:12:45.658939 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-dktq5" Nov 28 07:12:45 crc kubenswrapper[4922]: I1128 07:12:45.667043 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:46 crc kubenswrapper[4922]: W1128 07:12:46.124598 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f800348_9092_4701_a85d_3bb8f40b51bd.slice/crio-8ef52cd8e0e6408a6b60fd14a1662a8b95c1824d266c7fdbff39b8c98f25a3df WatchSource:0}: Error finding container 8ef52cd8e0e6408a6b60fd14a1662a8b95c1824d266c7fdbff39b8c98f25a3df: Status 404 returned error can't find the container with id 8ef52cd8e0e6408a6b60fd14a1662a8b95c1824d266c7fdbff39b8c98f25a3df Nov 28 07:12:46 crc kubenswrapper[4922]: I1128 07:12:46.127418 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9"] Nov 28 07:12:46 crc kubenswrapper[4922]: I1128 07:12:46.826031 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" event={"ID":"5f800348-9092-4701-a85d-3bb8f40b51bd","Type":"ContainerStarted","Data":"8ef52cd8e0e6408a6b60fd14a1662a8b95c1824d266c7fdbff39b8c98f25a3df"} Nov 28 07:12:48 crc kubenswrapper[4922]: I1128 07:12:48.845818 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" event={"ID":"b32f9a0b-4c07-433c-b02a-aed24265ae16","Type":"ContainerStarted","Data":"de66f26b831f61e406a2f62b3d22fc63fa83a6b35fcd939a772b0833cf4afa2a"} Nov 28 07:12:48 crc kubenswrapper[4922]: I1128 07:12:48.848769 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" event={"ID":"bbf78ced-8521-464d-a517-8ca721301421","Type":"ContainerStarted","Data":"ad22c4cc4b5acdf138b235e235ffc7794bac0b06ff2990ab9e9390cb3e666cd4"} Nov 28 07:12:48 crc kubenswrapper[4922]: I1128 07:12:48.851275 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" event={"ID":"5b5e379b-d7c5-45ae-81f8-cebce05059a0","Type":"ContainerStarted","Data":"b28ba95c789b2f5c665d30d9c0923edea0609f150cdf9164de0f657c9b299872"} Nov 28 07:12:48 crc kubenswrapper[4922]: I1128 07:12:48.853991 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" event={"ID":"743984bb-d44f-4721-b9a0-d12f71feb3e9","Type":"ContainerStarted","Data":"fcf49c9519a725f439cb42d2b95942fef5142fba761f7f2d66a8d626c0c33b43"} Nov 28 07:12:48 crc kubenswrapper[4922]: I1128 07:12:48.855521 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" event={"ID":"31bc74a9-0e17-4400-bef5-d36ff53831dd","Type":"ContainerStarted","Data":"d0876526253ebc6e8eba0613bf4f58889ff20f51594ee5549b01dd6182b52f3e"} Nov 28 07:12:48 crc kubenswrapper[4922]: I1128 07:12:48.855763 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" Nov 28 07:12:48 crc kubenswrapper[4922]: I1128 07:12:48.859958 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" Nov 28 07:12:48 crc kubenswrapper[4922]: I1128 07:12:48.878345 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-6kwvm" podStartSLOduration=13.147512293 podStartE2EDuration="36.878324573s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.305651671 +0000 UTC m=+1179.226047253" lastFinishedPulling="2025-11-28 07:12:38.036463931 +0000 UTC m=+1202.956859533" observedRunningTime="2025-11-28 07:12:48.870675468 +0000 UTC m=+1213.791071050" watchObservedRunningTime="2025-11-28 07:12:48.878324573 +0000 UTC m=+1213.798720155" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.864639 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" event={"ID":"b0972771-f2d5-4f8d-91cb-ef2fde0b536b","Type":"ContainerStarted","Data":"839a3abf6ae109a3feea4036082af1d0c43960a682bef75810dcb500ce702274"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.866432 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" event={"ID":"9c2b26fd-dac9-45ae-b100-b46c85c11506","Type":"ContainerStarted","Data":"3209e3fb42fb117b625bc6db46c42954486e24bcc31bd2b714da3223f77874b1"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.868182 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" event={"ID":"ff496359-1983-4a83-a344-3fb11e4587d9","Type":"ContainerStarted","Data":"3be33c26156529c128939b67f7154e1309711f5a9a33a2f461d7316544957363"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.869884 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" event={"ID":"a2b17b7d-a398-4ce6-9e4a-6d80e1e97369","Type":"ContainerStarted","Data":"61092bd9488fdabce1f9bfffe4df2ca17a11cf75a4ddef9008ca24f868ef1696"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.871940 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" event={"ID":"89dd35b2-343e-4e06-9fac-6474b93b294e","Type":"ContainerStarted","Data":"8670a8cee8f5eb8c5efbce045388ad516c5dd9a1ab18c6d463b3bf7769ed198f"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.873594 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" event={"ID":"b46e91a2-650c-4ae1-afa0-6edc6768c99e","Type":"ContainerStarted","Data":"4a41fbc8b2f03b55a4427b6190a5f2e7750fa82c8e0f707e29863d9ed02e6c77"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.875037 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" event={"ID":"857fa43c-dddb-46c5-88e2-00e359b43c66","Type":"ContainerStarted","Data":"8f4349a7bff8cbffccda843a148232fb10bb3ee223477721a3cc38c4117b0492"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.882320 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" event={"ID":"5f800348-9092-4701-a85d-3bb8f40b51bd","Type":"ContainerStarted","Data":"50ebd7d8501e87bbac86bfb37b6cd072174965f1d93499c375de16f705fde526"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.887083 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" event={"ID":"c905882d-e743-49a3-b331-f3b7cc0d8649","Type":"ContainerStarted","Data":"cd97d7d59b7202eb0ab6252b7ab4ae2e75599bfcd979c5d05905e9a71d74f403"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.889304 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" event={"ID":"210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b","Type":"ContainerStarted","Data":"a36dc4c1a13da3116c9b582bf93e76986bce662467ee8b72904cb1ad2e78ad46"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.890351 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.892202 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.892693 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" event={"ID":"5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf","Type":"ContainerStarted","Data":"ce0ab9eb777baa38e44b93b56ff7d4e3e053bab69087d7507ef9f4d2a66a23bc"} Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.893569 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.895182 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.908567 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-jgwt8" podStartSLOduration=15.637904145 podStartE2EDuration="37.90855001s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.427595396 +0000 UTC m=+1179.347990978" lastFinishedPulling="2025-11-28 07:12:36.698241231 +0000 UTC m=+1201.618636843" observedRunningTime="2025-11-28 07:12:49.905714754 +0000 UTC m=+1214.826110346" watchObservedRunningTime="2025-11-28 07:12:49.90855001 +0000 UTC m=+1214.828945602" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.929170 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" podStartSLOduration=14.307279497 podStartE2EDuration="37.929150659s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.424126033 +0000 UTC m=+1179.344521605" lastFinishedPulling="2025-11-28 07:12:38.045997185 +0000 UTC m=+1202.966392767" observedRunningTime="2025-11-28 07:12:49.922354048 +0000 UTC m=+1214.842749630" watchObservedRunningTime="2025-11-28 07:12:49.929150659 +0000 UTC m=+1214.849546241" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.953884 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" podStartSLOduration=14.313454722 podStartE2EDuration="37.953868499s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.421062852 +0000 UTC m=+1179.341458434" lastFinishedPulling="2025-11-28 07:12:38.061476619 +0000 UTC m=+1202.981872211" observedRunningTime="2025-11-28 07:12:49.951734522 +0000 UTC m=+1214.872130114" watchObservedRunningTime="2025-11-28 07:12:49.953868499 +0000 UTC m=+1214.874264081" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.976337 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" podStartSLOduration=14.870696391 podStartE2EDuration="37.976322569s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.584768572 +0000 UTC m=+1179.505164154" lastFinishedPulling="2025-11-28 07:12:37.69039475 +0000 UTC m=+1202.610790332" observedRunningTime="2025-11-28 07:12:49.969706392 +0000 UTC m=+1214.890101974" watchObservedRunningTime="2025-11-28 07:12:49.976322569 +0000 UTC m=+1214.896718151" Nov 28 07:12:49 crc kubenswrapper[4922]: I1128 07:12:49.986465 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-njqq9" podStartSLOduration=13.845961441 podStartE2EDuration="37.986452859s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:13.92248743 +0000 UTC m=+1178.842883022" lastFinishedPulling="2025-11-28 07:12:38.062978858 +0000 UTC m=+1202.983374440" observedRunningTime="2025-11-28 07:12:49.983383227 +0000 UTC m=+1214.903778809" watchObservedRunningTime="2025-11-28 07:12:49.986452859 +0000 UTC m=+1214.906848441" Nov 28 07:12:50 crc kubenswrapper[4922]: I1128 07:12:50.017845 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" podStartSLOduration=13.761147467 podStartE2EDuration="38.017822827s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:13.808815805 +0000 UTC m=+1178.729211387" lastFinishedPulling="2025-11-28 07:12:38.065491165 +0000 UTC m=+1202.985886747" observedRunningTime="2025-11-28 07:12:50.013741238 +0000 UTC m=+1214.934136820" watchObservedRunningTime="2025-11-28 07:12:50.017822827 +0000 UTC m=+1214.938218409" Nov 28 07:12:50 crc kubenswrapper[4922]: I1128 07:12:50.925012 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-cdnsq" podStartSLOduration=26.20985701 podStartE2EDuration="38.924174766s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.273333837 +0000 UTC m=+1179.193729419" lastFinishedPulling="2025-11-28 07:12:26.987651553 +0000 UTC m=+1191.908047175" observedRunningTime="2025-11-28 07:12:50.92094299 +0000 UTC m=+1215.841338602" watchObservedRunningTime="2025-11-28 07:12:50.924174766 +0000 UTC m=+1215.844570388" Nov 28 07:12:50 crc kubenswrapper[4922]: I1128 07:12:50.975129 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" podStartSLOduration=16.258694119 podStartE2EDuration="38.975101565s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.428020277 +0000 UTC m=+1179.348415859" lastFinishedPulling="2025-11-28 07:12:37.144427713 +0000 UTC m=+1202.064823305" observedRunningTime="2025-11-28 07:12:50.966719331 +0000 UTC m=+1215.887114953" watchObservedRunningTime="2025-11-28 07:12:50.975101565 +0000 UTC m=+1215.895497177" Nov 28 07:12:50 crc kubenswrapper[4922]: I1128 07:12:50.987871 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" podStartSLOduration=14.573893746 podStartE2EDuration="37.987850816s" podCreationTimestamp="2025-11-28 07:12:13 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.561667776 +0000 UTC m=+1179.482063358" lastFinishedPulling="2025-11-28 07:12:37.975624846 +0000 UTC m=+1202.896020428" observedRunningTime="2025-11-28 07:12:50.985495583 +0000 UTC m=+1215.905891195" watchObservedRunningTime="2025-11-28 07:12:50.987850816 +0000 UTC m=+1215.908246408" Nov 28 07:12:51 crc kubenswrapper[4922]: I1128 07:12:51.010588 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" podStartSLOduration=14.841708936 podStartE2EDuration="38.010563062s" podCreationTimestamp="2025-11-28 07:12:13 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.560723431 +0000 UTC m=+1179.481119013" lastFinishedPulling="2025-11-28 07:12:37.729577547 +0000 UTC m=+1202.649973139" observedRunningTime="2025-11-28 07:12:51.003762191 +0000 UTC m=+1215.924157773" watchObservedRunningTime="2025-11-28 07:12:51.010563062 +0000 UTC m=+1215.930958664" Nov 28 07:12:51 crc kubenswrapper[4922]: I1128 07:12:51.043117 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" podStartSLOduration=38.043096912 podStartE2EDuration="38.043096912s" podCreationTimestamp="2025-11-28 07:12:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:12:51.034864922 +0000 UTC m=+1215.955260514" watchObservedRunningTime="2025-11-28 07:12:51.043096912 +0000 UTC m=+1215.963492504" Nov 28 07:12:51 crc kubenswrapper[4922]: I1128 07:12:51.060175 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" podStartSLOduration=15.598095432000001 podStartE2EDuration="39.060155007s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.560506955 +0000 UTC m=+1179.480902537" lastFinishedPulling="2025-11-28 07:12:38.02256652 +0000 UTC m=+1202.942962112" observedRunningTime="2025-11-28 07:12:51.054968638 +0000 UTC m=+1215.975364220" watchObservedRunningTime="2025-11-28 07:12:51.060155007 +0000 UTC m=+1215.980550589" Nov 28 07:12:51 crc kubenswrapper[4922]: I1128 07:12:51.086557 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" podStartSLOduration=31.066066969 podStartE2EDuration="39.086536481s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:29.95725215 +0000 UTC m=+1194.877647732" lastFinishedPulling="2025-11-28 07:12:37.977721632 +0000 UTC m=+1202.898117244" observedRunningTime="2025-11-28 07:12:51.076893534 +0000 UTC m=+1215.997289126" watchObservedRunningTime="2025-11-28 07:12:51.086536481 +0000 UTC m=+1216.006932063" Nov 28 07:12:51 crc kubenswrapper[4922]: I1128 07:12:51.100176 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-jbsc8" podStartSLOduration=26.527117142 podStartE2EDuration="39.100154165s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.414772494 +0000 UTC m=+1179.335168076" lastFinishedPulling="2025-11-28 07:12:26.987809487 +0000 UTC m=+1191.908205099" observedRunningTime="2025-11-28 07:12:51.092759217 +0000 UTC m=+1216.013154799" watchObservedRunningTime="2025-11-28 07:12:51.100154165 +0000 UTC m=+1216.020549747" Nov 28 07:12:51 crc kubenswrapper[4922]: I1128 07:12:51.126255 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" podStartSLOduration=31.128147616 podStartE2EDuration="39.126239421s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:29.956781107 +0000 UTC m=+1194.877176689" lastFinishedPulling="2025-11-28 07:12:37.954872912 +0000 UTC m=+1202.875268494" observedRunningTime="2025-11-28 07:12:51.120270751 +0000 UTC m=+1216.040666353" watchObservedRunningTime="2025-11-28 07:12:51.126239421 +0000 UTC m=+1216.046635003" Nov 28 07:12:51 crc kubenswrapper[4922]: I1128 07:12:51.147503 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-htnmk" podStartSLOduration=26.249802187 podStartE2EDuration="39.147483358s" podCreationTimestamp="2025-11-28 07:12:12 +0000 UTC" firstStartedPulling="2025-11-28 07:12:14.090080955 +0000 UTC m=+1179.010476537" lastFinishedPulling="2025-11-28 07:12:26.987762136 +0000 UTC m=+1191.908157708" observedRunningTime="2025-11-28 07:12:51.14044676 +0000 UTC m=+1216.060842362" watchObservedRunningTime="2025-11-28 07:12:51.147483358 +0000 UTC m=+1216.067878950" Nov 28 07:12:52 crc kubenswrapper[4922]: I1128 07:12:52.951528 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" Nov 28 07:12:52 crc kubenswrapper[4922]: I1128 07:12:52.954933 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-7wc2c" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.225651 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.228796 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-bthkr" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.324284 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.328947 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-5bl8t" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.733280 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.736485 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-rl77z" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.756410 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.763753 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-4f6j5" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.796143 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.804911 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-5hpq8" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.810288 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.816512 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-7mrxz" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.831960 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" Nov 28 07:12:53 crc kubenswrapper[4922]: I1128 07:12:53.835353 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-h76jf" Nov 28 07:12:55 crc kubenswrapper[4922]: I1128 07:12:55.668121 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:55 crc kubenswrapper[4922]: I1128 07:12:55.673110 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-66f75ddbcc-b48l9" Nov 28 07:12:57 crc kubenswrapper[4922]: I1128 07:12:57.312347 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:12:57 crc kubenswrapper[4922]: I1128 07:12:57.312422 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:12:57 crc kubenswrapper[4922]: I1128 07:12:57.312489 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:12:57 crc kubenswrapper[4922]: I1128 07:12:57.313406 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"74644ed2805eab754b767a063d8b9fa8b033ceca1db2f16aed9a8b2d915a2091"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:12:57 crc kubenswrapper[4922]: I1128 07:12:57.313867 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://74644ed2805eab754b767a063d8b9fa8b033ceca1db2f16aed9a8b2d915a2091" gracePeriod=600 Nov 28 07:12:57 crc kubenswrapper[4922]: I1128 07:12:57.969571 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="74644ed2805eab754b767a063d8b9fa8b033ceca1db2f16aed9a8b2d915a2091" exitCode=0 Nov 28 07:12:57 crc kubenswrapper[4922]: I1128 07:12:57.969775 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"74644ed2805eab754b767a063d8b9fa8b033ceca1db2f16aed9a8b2d915a2091"} Nov 28 07:12:57 crc kubenswrapper[4922]: I1128 07:12:57.969919 4922 scope.go:117] "RemoveContainer" containerID="e4c215f7c16b1b6ee789152f5bfa304df0b7e2d633a6748eb5b815f0448ea2e7" Nov 28 07:12:58 crc kubenswrapper[4922]: I1128 07:12:58.985388 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"81e37e3417d1f4f55a00b3a748b722590a99b434ea982924a0a5d757ceb112c8"} Nov 28 07:12:58 crc kubenswrapper[4922]: I1128 07:12:58.996343 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:59 crc kubenswrapper[4922]: I1128 07:12:59.003388 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x2bzf" Nov 28 07:12:59 crc kubenswrapper[4922]: I1128 07:12:59.075871 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:12:59 crc kubenswrapper[4922]: I1128 07:12:59.083420 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5d9f9695db9c48b" Nov 28 07:13:13 crc kubenswrapper[4922]: I1128 07:13:13.998834 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-557f57d995-hqpvv"] Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.003426 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.005889 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.005960 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.006306 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-njtmg" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.006499 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.021669 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-557f57d995-hqpvv"] Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.075063 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-766fdc659c-674sf"] Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.076619 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.079022 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.085433 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-766fdc659c-674sf"] Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.192935 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95718cd4-a34d-4d0c-949f-27d67a0cea23-config\") pod \"dnsmasq-dns-557f57d995-hqpvv\" (UID: \"95718cd4-a34d-4d0c-949f-27d67a0cea23\") " pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.192985 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb6b2\" (UniqueName: \"kubernetes.io/projected/7d671d03-d5c3-43ae-a670-803728c16385-kube-api-access-jb6b2\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.193023 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-config\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.193142 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-dns-svc\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.193201 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v9m5\" (UniqueName: \"kubernetes.io/projected/95718cd4-a34d-4d0c-949f-27d67a0cea23-kube-api-access-4v9m5\") pod \"dnsmasq-dns-557f57d995-hqpvv\" (UID: \"95718cd4-a34d-4d0c-949f-27d67a0cea23\") " pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.294693 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95718cd4-a34d-4d0c-949f-27d67a0cea23-config\") pod \"dnsmasq-dns-557f57d995-hqpvv\" (UID: \"95718cd4-a34d-4d0c-949f-27d67a0cea23\") " pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.295352 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb6b2\" (UniqueName: \"kubernetes.io/projected/7d671d03-d5c3-43ae-a670-803728c16385-kube-api-access-jb6b2\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.295492 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-config\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.295573 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95718cd4-a34d-4d0c-949f-27d67a0cea23-config\") pod \"dnsmasq-dns-557f57d995-hqpvv\" (UID: \"95718cd4-a34d-4d0c-949f-27d67a0cea23\") " pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.295745 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-dns-svc\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.295867 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v9m5\" (UniqueName: \"kubernetes.io/projected/95718cd4-a34d-4d0c-949f-27d67a0cea23-kube-api-access-4v9m5\") pod \"dnsmasq-dns-557f57d995-hqpvv\" (UID: \"95718cd4-a34d-4d0c-949f-27d67a0cea23\") " pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.296354 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-config\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.296553 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-dns-svc\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.315272 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v9m5\" (UniqueName: \"kubernetes.io/projected/95718cd4-a34d-4d0c-949f-27d67a0cea23-kube-api-access-4v9m5\") pod \"dnsmasq-dns-557f57d995-hqpvv\" (UID: \"95718cd4-a34d-4d0c-949f-27d67a0cea23\") " pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.316013 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb6b2\" (UniqueName: \"kubernetes.io/projected/7d671d03-d5c3-43ae-a670-803728c16385-kube-api-access-jb6b2\") pod \"dnsmasq-dns-766fdc659c-674sf\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.324769 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.390652 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.624280 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-557f57d995-hqpvv"] Nov 28 07:13:14 crc kubenswrapper[4922]: W1128 07:13:14.627612 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95718cd4_a34d_4d0c_949f_27d67a0cea23.slice/crio-ea84c2b2616814a3494cb6b5e164e064f3123764daf6c327431576faf17a2c01 WatchSource:0}: Error finding container ea84c2b2616814a3494cb6b5e164e064f3123764daf6c327431576faf17a2c01: Status 404 returned error can't find the container with id ea84c2b2616814a3494cb6b5e164e064f3123764daf6c327431576faf17a2c01 Nov 28 07:13:14 crc kubenswrapper[4922]: I1128 07:13:14.677519 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-766fdc659c-674sf"] Nov 28 07:13:14 crc kubenswrapper[4922]: W1128 07:13:14.679373 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d671d03_d5c3_43ae_a670_803728c16385.slice/crio-3c5f74345fe301242ccfcff91e3c8f9abb164099d0a8ac570d5cbebeaf7bf084 WatchSource:0}: Error finding container 3c5f74345fe301242ccfcff91e3c8f9abb164099d0a8ac570d5cbebeaf7bf084: Status 404 returned error can't find the container with id 3c5f74345fe301242ccfcff91e3c8f9abb164099d0a8ac570d5cbebeaf7bf084 Nov 28 07:13:15 crc kubenswrapper[4922]: I1128 07:13:15.140770 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-766fdc659c-674sf" event={"ID":"7d671d03-d5c3-43ae-a670-803728c16385","Type":"ContainerStarted","Data":"3c5f74345fe301242ccfcff91e3c8f9abb164099d0a8ac570d5cbebeaf7bf084"} Nov 28 07:13:15 crc kubenswrapper[4922]: I1128 07:13:15.141912 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-557f57d995-hqpvv" event={"ID":"95718cd4-a34d-4d0c-949f-27d67a0cea23","Type":"ContainerStarted","Data":"ea84c2b2616814a3494cb6b5e164e064f3123764daf6c327431576faf17a2c01"} Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.063949 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-557f57d995-hqpvv"] Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.094999 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57dc4c6697-m7tx4"] Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.096777 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.107076 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57dc4c6697-m7tx4"] Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.240799 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9zkx\" (UniqueName: \"kubernetes.io/projected/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-kube-api-access-b9zkx\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.240917 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-dns-svc\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.240986 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-config\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.342304 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-dns-svc\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.342384 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-config\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.342480 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9zkx\" (UniqueName: \"kubernetes.io/projected/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-kube-api-access-b9zkx\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.343306 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-dns-svc\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.343651 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-config\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.367562 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9zkx\" (UniqueName: \"kubernetes.io/projected/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-kube-api-access-b9zkx\") pod \"dnsmasq-dns-57dc4c6697-m7tx4\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.420091 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.422715 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-766fdc659c-674sf"] Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.447162 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8446fd7c75-29qqr"] Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.448310 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.462642 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8446fd7c75-29qqr"] Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.549046 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ln7v\" (UniqueName: \"kubernetes.io/projected/89786935-51a3-4851-94e2-5b15218b914e-kube-api-access-6ln7v\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.549133 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-dns-svc\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.549207 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-config\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.650144 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ln7v\" (UniqueName: \"kubernetes.io/projected/89786935-51a3-4851-94e2-5b15218b914e-kube-api-access-6ln7v\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.650191 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-dns-svc\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.650230 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-config\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.651086 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-config\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.651509 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-dns-svc\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.670164 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ln7v\" (UniqueName: \"kubernetes.io/projected/89786935-51a3-4851-94e2-5b15218b914e-kube-api-access-6ln7v\") pod \"dnsmasq-dns-8446fd7c75-29qqr\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.778639 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:17 crc kubenswrapper[4922]: I1128 07:13:17.935340 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57dc4c6697-m7tx4"] Nov 28 07:13:17 crc kubenswrapper[4922]: W1128 07:13:17.953241 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa2656f2_23d1_46c5_9cb9_eed7c3cddea2.slice/crio-d7a3c024ff2aa5b7eef34cea80b65fc5bdd9829258d4829fc2aa76343c7b3bc9 WatchSource:0}: Error finding container d7a3c024ff2aa5b7eef34cea80b65fc5bdd9829258d4829fc2aa76343c7b3bc9: Status 404 returned error can't find the container with id d7a3c024ff2aa5b7eef34cea80b65fc5bdd9829258d4829fc2aa76343c7b3bc9 Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.178477 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" event={"ID":"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2","Type":"ContainerStarted","Data":"d7a3c024ff2aa5b7eef34cea80b65fc5bdd9829258d4829fc2aa76343c7b3bc9"} Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.251230 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.252960 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.263240 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.263463 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.263550 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.263663 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.263915 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.264669 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-7rcjw" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.267569 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8446fd7c75-29qqr"] Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.273028 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.277087 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.373953 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.374538 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.374782 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.375155 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.375481 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.375661 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.375842 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.376097 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26gv7\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-kube-api-access-26gv7\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.377257 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.377298 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.377316 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481067 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481112 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481152 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481202 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481238 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481264 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481296 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481321 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481340 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481357 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.481376 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26gv7\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-kube-api-access-26gv7\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.486045 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.486070 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.486407 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.486756 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.487084 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.487971 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.495724 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.497083 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.497703 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.504471 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26gv7\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-kube-api-access-26gv7\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.513726 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.545162 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.546796 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.547502 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.555100 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.555528 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.555726 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.555842 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.555974 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.557680 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-mhsrl" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.557841 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.583180 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.602457 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687172 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687247 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687285 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-kube-api-access-jbnrh\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687334 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687360 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99708a5d-57d5-4479-8e09-94428bb13fa3-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687381 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687415 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687455 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687494 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687546 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.687569 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99708a5d-57d5-4479-8e09-94428bb13fa3-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791315 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791366 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791436 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791453 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99708a5d-57d5-4479-8e09-94428bb13fa3-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791470 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791488 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791510 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-kube-api-access-jbnrh\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791544 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791563 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99708a5d-57d5-4479-8e09-94428bb13fa3-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791578 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791624 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.791937 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.792260 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.792598 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.792740 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.793439 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.793569 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.800825 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.800960 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99708a5d-57d5-4479-8e09-94428bb13fa3-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.800975 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.803911 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99708a5d-57d5-4479-8e09-94428bb13fa3-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.811597 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-kube-api-access-jbnrh\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.821634 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:18 crc kubenswrapper[4922]: I1128 07:13:18.919427 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.118422 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 07:13:19 crc kubenswrapper[4922]: W1128 07:13:19.134606 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4cf25acc_0d60_4b0a_a9c9_adc7ddce7458.slice/crio-e3409056409358e85777883f3bf600a01d3c44a5636d56f4cf42551639e283b1 WatchSource:0}: Error finding container e3409056409358e85777883f3bf600a01d3c44a5636d56f4cf42551639e283b1: Status 404 returned error can't find the container with id e3409056409358e85777883f3bf600a01d3c44a5636d56f4cf42551639e283b1 Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.202982 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" event={"ID":"89786935-51a3-4851-94e2-5b15218b914e","Type":"ContainerStarted","Data":"b893fef02113ba4bfa9bdc86aa4ef22a610e418602ee48956020137c9a37c5ec"} Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.207982 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458","Type":"ContainerStarted","Data":"e3409056409358e85777883f3bf600a01d3c44a5636d56f4cf42551639e283b1"} Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.531696 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 07:13:19 crc kubenswrapper[4922]: W1128 07:13:19.542376 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod99708a5d_57d5_4479_8e09_94428bb13fa3.slice/crio-ecc7f30a159e75a43da65e468aa39428aaf9bebe3b66fa2f7aad9d33f5f3926e WatchSource:0}: Error finding container ecc7f30a159e75a43da65e468aa39428aaf9bebe3b66fa2f7aad9d33f5f3926e: Status 404 returned error can't find the container with id ecc7f30a159e75a43da65e468aa39428aaf9bebe3b66fa2f7aad9d33f5f3926e Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.556848 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.558116 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.559828 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.564141 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-882fh" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.564937 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.565071 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.568586 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.570813 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.714741 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-default\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.715599 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.715896 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.715935 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.715981 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.716056 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kolla-config\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.716081 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.716105 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bcdt\" (UniqueName: \"kubernetes.io/projected/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kube-api-access-5bcdt\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.818127 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.818181 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.818237 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.818284 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kolla-config\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.818310 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.818336 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bcdt\" (UniqueName: \"kubernetes.io/projected/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kube-api-access-5bcdt\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.818393 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-default\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.818423 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.819670 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kolla-config\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.819876 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.820102 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.820281 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.820870 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-default\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.828410 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.834316 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.848885 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bcdt\" (UniqueName: \"kubernetes.io/projected/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kube-api-access-5bcdt\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.892314 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " pod="openstack/openstack-galera-0" Nov 28 07:13:19 crc kubenswrapper[4922]: I1128 07:13:19.898842 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.223821 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"99708a5d-57d5-4479-8e09-94428bb13fa3","Type":"ContainerStarted","Data":"ecc7f30a159e75a43da65e468aa39428aaf9bebe3b66fa2f7aad9d33f5f3926e"} Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.250479 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 07:13:20 crc kubenswrapper[4922]: W1128 07:13:20.283077 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc39356f2_8f5d_45d3_8188_7d9428c4d8bf.slice/crio-20b20550f23cb78280ef849113c8204b5d6baf3b4b068ecd797594bbd37d1fb6 WatchSource:0}: Error finding container 20b20550f23cb78280ef849113c8204b5d6baf3b4b068ecd797594bbd37d1fb6: Status 404 returned error can't find the container with id 20b20550f23cb78280ef849113c8204b5d6baf3b4b068ecd797594bbd37d1fb6 Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.973654 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.977164 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.978950 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.981422 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-qhsvt" Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.981777 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.982003 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 07:13:20 crc kubenswrapper[4922]: I1128 07:13:20.983293 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.143355 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp7dg\" (UniqueName: \"kubernetes.io/projected/349fc74f-b0ac-437d-89ab-7106192b8e9e-kube-api-access-qp7dg\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.143397 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.143415 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.143444 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.143497 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.143529 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.143644 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.143771 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.244846 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.244901 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.244940 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.244976 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp7dg\" (UniqueName: \"kubernetes.io/projected/349fc74f-b0ac-437d-89ab-7106192b8e9e-kube-api-access-qp7dg\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.244997 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.245014 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.245067 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.245303 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.250464 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.250535 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.250774 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.253184 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.253293 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.257243 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.257337 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.279468 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp7dg\" (UniqueName: \"kubernetes.io/projected/349fc74f-b0ac-437d-89ab-7106192b8e9e-kube-api-access-qp7dg\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.293487 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.294456 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.298133 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.298400 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-fv6xl" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.298580 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.301201 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.324868 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c39356f2-8f5d-45d3-8188-7d9428c4d8bf","Type":"ContainerStarted","Data":"20b20550f23cb78280ef849113c8204b5d6baf3b4b068ecd797594bbd37d1fb6"} Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.327955 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.346617 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.448604 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnkvs\" (UniqueName: \"kubernetes.io/projected/8d12d285-16c4-4e64-98d8-cff0f581aee4-kube-api-access-mnkvs\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.448704 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.448939 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-config-data\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.448990 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-kolla-config\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.449015 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.556597 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnkvs\" (UniqueName: \"kubernetes.io/projected/8d12d285-16c4-4e64-98d8-cff0f581aee4-kube-api-access-mnkvs\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.556669 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.556722 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-config-data\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.556742 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-kolla-config\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.556762 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.558009 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-config-data\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.558039 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-kolla-config\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.559943 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.578853 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.605933 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnkvs\" (UniqueName: \"kubernetes.io/projected/8d12d285-16c4-4e64-98d8-cff0f581aee4-kube-api-access-mnkvs\") pod \"memcached-0\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " pod="openstack/memcached-0" Nov 28 07:13:21 crc kubenswrapper[4922]: I1128 07:13:21.668796 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 07:13:23 crc kubenswrapper[4922]: I1128 07:13:23.310612 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:13:23 crc kubenswrapper[4922]: I1128 07:13:23.312896 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 07:13:23 crc kubenswrapper[4922]: I1128 07:13:23.321865 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-tn2mb" Nov 28 07:13:23 crc kubenswrapper[4922]: I1128 07:13:23.344904 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:13:23 crc kubenswrapper[4922]: I1128 07:13:23.399740 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnbq2\" (UniqueName: \"kubernetes.io/projected/43f49b9c-475e-4b28-ada6-e73db47e4bd7-kube-api-access-jnbq2\") pod \"kube-state-metrics-0\" (UID: \"43f49b9c-475e-4b28-ada6-e73db47e4bd7\") " pod="openstack/kube-state-metrics-0" Nov 28 07:13:23 crc kubenswrapper[4922]: I1128 07:13:23.501098 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnbq2\" (UniqueName: \"kubernetes.io/projected/43f49b9c-475e-4b28-ada6-e73db47e4bd7-kube-api-access-jnbq2\") pod \"kube-state-metrics-0\" (UID: \"43f49b9c-475e-4b28-ada6-e73db47e4bd7\") " pod="openstack/kube-state-metrics-0" Nov 28 07:13:23 crc kubenswrapper[4922]: I1128 07:13:23.522558 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnbq2\" (UniqueName: \"kubernetes.io/projected/43f49b9c-475e-4b28-ada6-e73db47e4bd7-kube-api-access-jnbq2\") pod \"kube-state-metrics-0\" (UID: \"43f49b9c-475e-4b28-ada6-e73db47e4bd7\") " pod="openstack/kube-state-metrics-0" Nov 28 07:13:23 crc kubenswrapper[4922]: I1128 07:13:23.685285 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.532862 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-xqzrg"] Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.534494 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.536487 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.536874 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.537013 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-ctnjg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.542590 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqzrg"] Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.602740 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-m9xpz"] Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.607329 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.614119 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-m9xpz"] Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.651983 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5e01f31-28bd-46a2-b5cc-695c485deaf6-scripts\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.652056 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run-ovn\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.652084 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-ovn-controller-tls-certs\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.652115 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfnr9\" (UniqueName: \"kubernetes.io/projected/e5e01f31-28bd-46a2-b5cc-695c485deaf6-kube-api-access-zfnr9\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.652166 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.652204 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-log-ovn\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.652305 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-combined-ca-bundle\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754325 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5e01f31-28bd-46a2-b5cc-695c485deaf6-scripts\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754373 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-log\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754397 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-etc-ovs\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754426 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run-ovn\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754443 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-ovn-controller-tls-certs\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754460 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-run\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754483 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfnr9\" (UniqueName: \"kubernetes.io/projected/e5e01f31-28bd-46a2-b5cc-695c485deaf6-kube-api-access-zfnr9\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754498 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqgwh\" (UniqueName: \"kubernetes.io/projected/ec882eb7-01fb-4f7f-bad8-812346e5880e-kube-api-access-mqgwh\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754524 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec882eb7-01fb-4f7f-bad8-812346e5880e-scripts\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754553 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754568 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-lib\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754599 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-log-ovn\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.754617 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-combined-ca-bundle\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.755621 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-log-ovn\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.755683 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run-ovn\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.755791 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.757071 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5e01f31-28bd-46a2-b5cc-695c485deaf6-scripts\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.764990 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-ovn-controller-tls-certs\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.765012 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-combined-ca-bundle\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.771141 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfnr9\" (UniqueName: \"kubernetes.io/projected/e5e01f31-28bd-46a2-b5cc-695c485deaf6-kube-api-access-zfnr9\") pod \"ovn-controller-xqzrg\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856159 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-etc-ovs\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856245 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-run\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856291 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqgwh\" (UniqueName: \"kubernetes.io/projected/ec882eb7-01fb-4f7f-bad8-812346e5880e-kube-api-access-mqgwh\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856328 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec882eb7-01fb-4f7f-bad8-812346e5880e-scripts\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856368 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-lib\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856454 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-log\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856520 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-run\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856635 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-lib\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.856661 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-log\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.862591 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec882eb7-01fb-4f7f-bad8-812346e5880e-scripts\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.862775 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-etc-ovs\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.862879 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.871939 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqgwh\" (UniqueName: \"kubernetes.io/projected/ec882eb7-01fb-4f7f-bad8-812346e5880e-kube-api-access-mqgwh\") pod \"ovn-controller-ovs-m9xpz\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:26 crc kubenswrapper[4922]: I1128 07:13:26.948611 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.006963 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.009125 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.026973 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-r5bbj" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.027154 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.027607 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.028107 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.028292 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.032208 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.162441 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.162505 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.162696 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.162766 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.162791 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-config\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.162832 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.162856 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.162959 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65xxr\" (UniqueName: \"kubernetes.io/projected/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-kube-api-access-65xxr\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.264752 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.264802 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.264820 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-config\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.264846 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.264862 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.264892 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65xxr\" (UniqueName: \"kubernetes.io/projected/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-kube-api-access-65xxr\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.264949 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.264979 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.265646 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.265702 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.265806 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-config\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.265916 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.268678 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.269345 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.270251 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.284050 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65xxr\" (UniqueName: \"kubernetes.io/projected/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-kube-api-access-65xxr\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.284691 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:27 crc kubenswrapper[4922]: I1128 07:13:27.366883 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.522466 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.524089 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.528408 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.528576 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-rdf8h" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.528672 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.537660 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.545406 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.635320 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.635674 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.635709 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.635735 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.635823 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nblg2\" (UniqueName: \"kubernetes.io/projected/39a4d24f-6b5b-48fc-ab66-1ad33462c477-kube-api-access-nblg2\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.635917 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.636014 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-config\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.636054 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.738070 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.738113 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.738570 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.738611 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.738684 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nblg2\" (UniqueName: \"kubernetes.io/projected/39a4d24f-6b5b-48fc-ab66-1ad33462c477-kube-api-access-nblg2\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.738716 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.738763 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-config\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.738795 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.740151 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.740903 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-config\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.740978 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.741686 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.746321 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.753484 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.756376 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nblg2\" (UniqueName: \"kubernetes.io/projected/39a4d24f-6b5b-48fc-ab66-1ad33462c477-kube-api-access-nblg2\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.761237 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.767466 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:30 crc kubenswrapper[4922]: I1128 07:13:30.856146 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:35 crc kubenswrapper[4922]: E1128 07:13:35.162186 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:c64e18fe0ecb6900e763e6cf6be0ca8f71b5c8af9e078a543238a505cf88ae46" Nov 28 07:13:35 crc kubenswrapper[4922]: E1128 07:13:35.162633 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:c64e18fe0ecb6900e763e6cf6be0ca8f71b5c8af9e078a543238a505cf88ae46,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jbnrh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(99708a5d-57d5-4479-8e09-94428bb13fa3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:13:35 crc kubenswrapper[4922]: E1128 07:13:35.163948 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" Nov 28 07:13:35 crc kubenswrapper[4922]: E1128 07:13:35.456544 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:c64e18fe0ecb6900e763e6cf6be0ca8f71b5c8af9e078a543238a505cf88ae46\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" Nov 28 07:13:42 crc kubenswrapper[4922]: E1128 07:13:42.986240 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:5526be2fd8d8cdc035078fdbcb7de6b02c081147295a13f2b1e50e281ef17f52" Nov 28 07:13:42 crc kubenswrapper[4922]: E1128 07:13:42.986636 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:5526be2fd8d8cdc035078fdbcb7de6b02c081147295a13f2b1e50e281ef17f52,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5bcdt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(c39356f2-8f5d-45d3-8188-7d9428c4d8bf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:13:42 crc kubenswrapper[4922]: E1128 07:13:42.987804 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" Nov 28 07:13:43 crc kubenswrapper[4922]: E1128 07:13:43.519577 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:5526be2fd8d8cdc035078fdbcb7de6b02c081147295a13f2b1e50e281ef17f52\\\"\"" pod="openstack/openstack-galera-0" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.844744 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.845080 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b9zkx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57dc4c6697-m7tx4_openstack(aa2656f2-23d1-46c5-9cb9-eed7c3cddea2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.846250 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" podUID="aa2656f2-23d1-46c5-9cb9-eed7c3cddea2" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.857092 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.857575 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6ln7v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-8446fd7c75-29qqr_openstack(89786935-51a3-4851-94e2-5b15218b914e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.858934 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" podUID="89786935-51a3-4851-94e2-5b15218b914e" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.874484 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.874652 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4v9m5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-557f57d995-hqpvv_openstack(95718cd4-a34d-4d0c-949f-27d67a0cea23): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.875972 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-557f57d995-hqpvv" podUID="95718cd4-a34d-4d0c-949f-27d67a0cea23" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.949260 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.949400 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jb6b2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-766fdc659c-674sf_openstack(7d671d03-d5c3-43ae-a670-803728c16385): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:13:45 crc kubenswrapper[4922]: E1128 07:13:45.950696 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-766fdc659c-674sf" podUID="7d671d03-d5c3-43ae-a670-803728c16385" Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.329139 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.426847 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqzrg"] Nov 28 07:13:46 crc kubenswrapper[4922]: W1128 07:13:46.455204 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode5e01f31_28bd_46a2_b5cc_695c485deaf6.slice/crio-3690849f7167495c60259b16baa6d41a1a79a1d99bbe1ad074b5a9459961e00f WatchSource:0}: Error finding container 3690849f7167495c60259b16baa6d41a1a79a1d99bbe1ad074b5a9459961e00f: Status 404 returned error can't find the container with id 3690849f7167495c60259b16baa6d41a1a79a1d99bbe1ad074b5a9459961e00f Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.492423 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-m9xpz"] Nov 28 07:13:46 crc kubenswrapper[4922]: W1128 07:13:46.495756 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec882eb7_01fb_4f7f_bad8_812346e5880e.slice/crio-e8931293e26c27ffc2711e53ca6ee67c9e2f3aab83ae99464d7ea58d035ba809 WatchSource:0}: Error finding container e8931293e26c27ffc2711e53ca6ee67c9e2f3aab83ae99464d7ea58d035ba809: Status 404 returned error can't find the container with id e8931293e26c27ffc2711e53ca6ee67c9e2f3aab83ae99464d7ea58d035ba809 Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.529777 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 07:13:46 crc kubenswrapper[4922]: W1128 07:13:46.530842 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d12d285_16c4_4e64_98d8_cff0f581aee4.slice/crio-af33c01520597a5c76e0a71520199a315868151f769652c34a44acedd0a51dfd WatchSource:0}: Error finding container af33c01520597a5c76e0a71520199a315868151f769652c34a44acedd0a51dfd: Status 404 returned error can't find the container with id af33c01520597a5c76e0a71520199a315868151f769652c34a44acedd0a51dfd Nov 28 07:13:46 crc kubenswrapper[4922]: W1128 07:13:46.533189 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43f49b9c_475e_4b28_ada6_e73db47e4bd7.slice/crio-efdc7798db4213709ae0ccc482f0aefa0e60cade5343f3783c160883f6206306 WatchSource:0}: Error finding container efdc7798db4213709ae0ccc482f0aefa0e60cade5343f3783c160883f6206306: Status 404 returned error can't find the container with id efdc7798db4213709ae0ccc482f0aefa0e60cade5343f3783c160883f6206306 Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.536032 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.548886 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8d12d285-16c4-4e64-98d8-cff0f581aee4","Type":"ContainerStarted","Data":"af33c01520597a5c76e0a71520199a315868151f769652c34a44acedd0a51dfd"} Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.551919 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m9xpz" event={"ID":"ec882eb7-01fb-4f7f-bad8-812346e5880e","Type":"ContainerStarted","Data":"e8931293e26c27ffc2711e53ca6ee67c9e2f3aab83ae99464d7ea58d035ba809"} Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.553386 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"349fc74f-b0ac-437d-89ab-7106192b8e9e","Type":"ContainerStarted","Data":"0473de9d0028c45b30a6a715907f3797e6865f1ca57636aef4dea1cc1978c9a0"} Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.554646 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg" event={"ID":"e5e01f31-28bd-46a2-b5cc-695c485deaf6","Type":"ContainerStarted","Data":"3690849f7167495c60259b16baa6d41a1a79a1d99bbe1ad074b5a9459961e00f"} Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.555608 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"43f49b9c-475e-4b28-ada6-e73db47e4bd7","Type":"ContainerStarted","Data":"efdc7798db4213709ae0ccc482f0aefa0e60cade5343f3783c160883f6206306"} Nov 28 07:13:46 crc kubenswrapper[4922]: E1128 07:13:46.556794 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627\\\"\"" pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" podUID="aa2656f2-23d1-46c5-9cb9-eed7c3cddea2" Nov 28 07:13:46 crc kubenswrapper[4922]: E1128 07:13:46.556922 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:4218330ae90f65f4a2c1d93334812c4d04a4ed1d46013269252aba16e1138627\\\"\"" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" podUID="89786935-51a3-4851-94e2-5b15218b914e" Nov 28 07:13:46 crc kubenswrapper[4922]: I1128 07:13:46.599135 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.027647 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 07:13:47 crc kubenswrapper[4922]: W1128 07:13:47.244451 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd27299ac_7d8d_4485_86fb_6ac7f34ea1ae.slice/crio-b8fcb175ab0edf5fb2b69492452725d0f9af96c20603ff5db168462c5286327e WatchSource:0}: Error finding container b8fcb175ab0edf5fb2b69492452725d0f9af96c20603ff5db168462c5286327e: Status 404 returned error can't find the container with id b8fcb175ab0edf5fb2b69492452725d0f9af96c20603ff5db168462c5286327e Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.336639 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.380573 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.483417 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-config\") pod \"7d671d03-d5c3-43ae-a670-803728c16385\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.483722 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95718cd4-a34d-4d0c-949f-27d67a0cea23-config\") pod \"95718cd4-a34d-4d0c-949f-27d67a0cea23\" (UID: \"95718cd4-a34d-4d0c-949f-27d67a0cea23\") " Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.483863 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-dns-svc\") pod \"7d671d03-d5c3-43ae-a670-803728c16385\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.483921 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v9m5\" (UniqueName: \"kubernetes.io/projected/95718cd4-a34d-4d0c-949f-27d67a0cea23-kube-api-access-4v9m5\") pod \"95718cd4-a34d-4d0c-949f-27d67a0cea23\" (UID: \"95718cd4-a34d-4d0c-949f-27d67a0cea23\") " Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.484001 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jb6b2\" (UniqueName: \"kubernetes.io/projected/7d671d03-d5c3-43ae-a670-803728c16385-kube-api-access-jb6b2\") pod \"7d671d03-d5c3-43ae-a670-803728c16385\" (UID: \"7d671d03-d5c3-43ae-a670-803728c16385\") " Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.484325 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7d671d03-d5c3-43ae-a670-803728c16385" (UID: "7d671d03-d5c3-43ae-a670-803728c16385"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.484389 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-config" (OuterVolumeSpecName: "config") pod "7d671d03-d5c3-43ae-a670-803728c16385" (UID: "7d671d03-d5c3-43ae-a670-803728c16385"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.484859 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95718cd4-a34d-4d0c-949f-27d67a0cea23-config" (OuterVolumeSpecName: "config") pod "95718cd4-a34d-4d0c-949f-27d67a0cea23" (UID: "95718cd4-a34d-4d0c-949f-27d67a0cea23"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.489196 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95718cd4-a34d-4d0c-949f-27d67a0cea23-kube-api-access-4v9m5" (OuterVolumeSpecName: "kube-api-access-4v9m5") pod "95718cd4-a34d-4d0c-949f-27d67a0cea23" (UID: "95718cd4-a34d-4d0c-949f-27d67a0cea23"). InnerVolumeSpecName "kube-api-access-4v9m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.489563 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d671d03-d5c3-43ae-a670-803728c16385-kube-api-access-jb6b2" (OuterVolumeSpecName: "kube-api-access-jb6b2") pod "7d671d03-d5c3-43ae-a670-803728c16385" (UID: "7d671d03-d5c3-43ae-a670-803728c16385"). InnerVolumeSpecName "kube-api-access-jb6b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.570275 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae","Type":"ContainerStarted","Data":"b8fcb175ab0edf5fb2b69492452725d0f9af96c20603ff5db168462c5286327e"} Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.572327 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-766fdc659c-674sf" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.572339 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-766fdc659c-674sf" event={"ID":"7d671d03-d5c3-43ae-a670-803728c16385","Type":"ContainerDied","Data":"3c5f74345fe301242ccfcff91e3c8f9abb164099d0a8ac570d5cbebeaf7bf084"} Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.576913 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-557f57d995-hqpvv" event={"ID":"95718cd4-a34d-4d0c-949f-27d67a0cea23","Type":"ContainerDied","Data":"ea84c2b2616814a3494cb6b5e164e064f3123764daf6c327431576faf17a2c01"} Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.577131 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-557f57d995-hqpvv" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.579579 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39a4d24f-6b5b-48fc-ab66-1ad33462c477","Type":"ContainerStarted","Data":"b5071c1943aa02a0d763ccb09244c3588d19f030bea2c6ea0dd5385ade9fc987"} Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.581631 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458","Type":"ContainerStarted","Data":"33fac45176641182615e4dbf61bb82f263874c2935f70fa33ef270e4398e93a2"} Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.587077 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v9m5\" (UniqueName: \"kubernetes.io/projected/95718cd4-a34d-4d0c-949f-27d67a0cea23-kube-api-access-4v9m5\") on node \"crc\" DevicePath \"\"" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.587253 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jb6b2\" (UniqueName: \"kubernetes.io/projected/7d671d03-d5c3-43ae-a670-803728c16385-kube-api-access-jb6b2\") on node \"crc\" DevicePath \"\"" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.587287 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.587303 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95718cd4-a34d-4d0c-949f-27d67a0cea23-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.587317 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d671d03-d5c3-43ae-a670-803728c16385-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.644960 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-766fdc659c-674sf"] Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.650980 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-766fdc659c-674sf"] Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.685681 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-557f57d995-hqpvv"] Nov 28 07:13:47 crc kubenswrapper[4922]: I1128 07:13:47.691356 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-557f57d995-hqpvv"] Nov 28 07:13:48 crc kubenswrapper[4922]: I1128 07:13:48.590282 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"349fc74f-b0ac-437d-89ab-7106192b8e9e","Type":"ContainerStarted","Data":"165900f07622025ebdb7502eec91c0d20df88aaec6da0163ef170ae871ddc19f"} Nov 28 07:13:49 crc kubenswrapper[4922]: I1128 07:13:49.412551 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d671d03-d5c3-43ae-a670-803728c16385" path="/var/lib/kubelet/pods/7d671d03-d5c3-43ae-a670-803728c16385/volumes" Nov 28 07:13:49 crc kubenswrapper[4922]: I1128 07:13:49.413002 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95718cd4-a34d-4d0c-949f-27d67a0cea23" path="/var/lib/kubelet/pods/95718cd4-a34d-4d0c-949f-27d67a0cea23/volumes" Nov 28 07:13:52 crc kubenswrapper[4922]: I1128 07:13:52.623758 4922 generic.go:334] "Generic (PLEG): container finished" podID="349fc74f-b0ac-437d-89ab-7106192b8e9e" containerID="165900f07622025ebdb7502eec91c0d20df88aaec6da0163ef170ae871ddc19f" exitCode=0 Nov 28 07:13:52 crc kubenswrapper[4922]: I1128 07:13:52.623853 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"349fc74f-b0ac-437d-89ab-7106192b8e9e","Type":"ContainerDied","Data":"165900f07622025ebdb7502eec91c0d20df88aaec6da0163ef170ae871ddc19f"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.632263 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8d12d285-16c4-4e64-98d8-cff0f581aee4","Type":"ContainerStarted","Data":"6327f387153de8972c997a6ac2a21401ab00a932a9990342cf61d1e499fcf45e"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.632871 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.634198 4922 generic.go:334] "Generic (PLEG): container finished" podID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerID="be503fab67fd063d8f5a6fe862d508920bd019c3b2a011b44c4391dfea4c60e2" exitCode=0 Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.634281 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m9xpz" event={"ID":"ec882eb7-01fb-4f7f-bad8-812346e5880e","Type":"ContainerDied","Data":"be503fab67fd063d8f5a6fe862d508920bd019c3b2a011b44c4391dfea4c60e2"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.635853 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae","Type":"ContainerStarted","Data":"240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.637453 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"99708a5d-57d5-4479-8e09-94428bb13fa3","Type":"ContainerStarted","Data":"9e43566d759ea7d848615e1c8beb2d9a8c5b517a0be3388bc208d070214e406b"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.639639 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"349fc74f-b0ac-437d-89ab-7106192b8e9e","Type":"ContainerStarted","Data":"e866e4c558d425e16d02cae7a1249331e5e9aee144a65a24341d72360112484f"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.641078 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg" event={"ID":"e5e01f31-28bd-46a2-b5cc-695c485deaf6","Type":"ContainerStarted","Data":"ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.641578 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-xqzrg" Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.642869 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39a4d24f-6b5b-48fc-ab66-1ad33462c477","Type":"ContainerStarted","Data":"38e3e9f9a924e02c5e76f4a6a76132d9a4596633913f1d0199cbf2b940a82fc6"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.646950 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"43f49b9c-475e-4b28-ada6-e73db47e4bd7","Type":"ContainerStarted","Data":"2e7cddf6b0653261ca2f1e88ef62868195f7ba744ecd3dbed01efef8d6511634"} Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.647183 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.652360 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=26.959281526 podStartE2EDuration="32.652334782s" podCreationTimestamp="2025-11-28 07:13:21 +0000 UTC" firstStartedPulling="2025-11-28 07:13:46.53378849 +0000 UTC m=+1271.454184072" lastFinishedPulling="2025-11-28 07:13:52.226841746 +0000 UTC m=+1277.147237328" observedRunningTime="2025-11-28 07:13:53.648702896 +0000 UTC m=+1278.569098498" watchObservedRunningTime="2025-11-28 07:13:53.652334782 +0000 UTC m=+1278.572730374" Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.668417 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=24.156629024 podStartE2EDuration="30.668396499s" podCreationTimestamp="2025-11-28 07:13:23 +0000 UTC" firstStartedPulling="2025-11-28 07:13:46.536764879 +0000 UTC m=+1271.457160461" lastFinishedPulling="2025-11-28 07:13:53.048532344 +0000 UTC m=+1277.968927936" observedRunningTime="2025-11-28 07:13:53.66315933 +0000 UTC m=+1278.583554932" watchObservedRunningTime="2025-11-28 07:13:53.668396499 +0000 UTC m=+1278.588792081" Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.691182 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=33.766337678 podStartE2EDuration="34.691160783s" podCreationTimestamp="2025-11-28 07:13:19 +0000 UTC" firstStartedPulling="2025-11-28 07:13:46.330547078 +0000 UTC m=+1271.250942660" lastFinishedPulling="2025-11-28 07:13:47.255370173 +0000 UTC m=+1272.175765765" observedRunningTime="2025-11-28 07:13:53.68841892 +0000 UTC m=+1278.608814512" watchObservedRunningTime="2025-11-28 07:13:53.691160783 +0000 UTC m=+1278.611556365" Nov 28 07:13:53 crc kubenswrapper[4922]: I1128 07:13:53.752189 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-xqzrg" podStartSLOduration=22.17468931 podStartE2EDuration="27.752172821s" podCreationTimestamp="2025-11-28 07:13:26 +0000 UTC" firstStartedPulling="2025-11-28 07:13:46.457830665 +0000 UTC m=+1271.378226247" lastFinishedPulling="2025-11-28 07:13:52.035314176 +0000 UTC m=+1276.955709758" observedRunningTime="2025-11-28 07:13:53.750551468 +0000 UTC m=+1278.670947070" watchObservedRunningTime="2025-11-28 07:13:53.752172821 +0000 UTC m=+1278.672568403" Nov 28 07:13:54 crc kubenswrapper[4922]: I1128 07:13:54.659789 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m9xpz" event={"ID":"ec882eb7-01fb-4f7f-bad8-812346e5880e","Type":"ContainerStarted","Data":"35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2"} Nov 28 07:13:54 crc kubenswrapper[4922]: I1128 07:13:54.660086 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m9xpz" event={"ID":"ec882eb7-01fb-4f7f-bad8-812346e5880e","Type":"ContainerStarted","Data":"7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5"} Nov 28 07:13:54 crc kubenswrapper[4922]: I1128 07:13:54.660143 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:54 crc kubenswrapper[4922]: I1128 07:13:54.660712 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:13:54 crc kubenswrapper[4922]: I1128 07:13:54.682875 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-m9xpz" podStartSLOduration=22.940567459 podStartE2EDuration="28.682858831s" podCreationTimestamp="2025-11-28 07:13:26 +0000 UTC" firstStartedPulling="2025-11-28 07:13:46.499169082 +0000 UTC m=+1271.419564664" lastFinishedPulling="2025-11-28 07:13:52.241460444 +0000 UTC m=+1277.161856036" observedRunningTime="2025-11-28 07:13:54.681862164 +0000 UTC m=+1279.602257746" watchObservedRunningTime="2025-11-28 07:13:54.682858831 +0000 UTC m=+1279.603254413" Nov 28 07:13:55 crc kubenswrapper[4922]: I1128 07:13:55.675872 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c39356f2-8f5d-45d3-8188-7d9428c4d8bf","Type":"ContainerStarted","Data":"69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1"} Nov 28 07:13:57 crc kubenswrapper[4922]: I1128 07:13:57.692339 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae","Type":"ContainerStarted","Data":"ed684f6b629f4e8652e956f602ceebf88d808cf52630b1ec4ad72baf8b709140"} Nov 28 07:13:57 crc kubenswrapper[4922]: I1128 07:13:57.695415 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39a4d24f-6b5b-48fc-ab66-1ad33462c477","Type":"ContainerStarted","Data":"b461584211ac1833270ce57012470931433410147f21b7bbc92e13735e9d4731"} Nov 28 07:13:57 crc kubenswrapper[4922]: I1128 07:13:57.712331 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=22.823432362 podStartE2EDuration="32.712309416s" podCreationTimestamp="2025-11-28 07:13:25 +0000 UTC" firstStartedPulling="2025-11-28 07:13:47.250941485 +0000 UTC m=+1272.171337087" lastFinishedPulling="2025-11-28 07:13:57.139818559 +0000 UTC m=+1282.060214141" observedRunningTime="2025-11-28 07:13:57.710827367 +0000 UTC m=+1282.631222939" watchObservedRunningTime="2025-11-28 07:13:57.712309416 +0000 UTC m=+1282.632704998" Nov 28 07:13:57 crc kubenswrapper[4922]: I1128 07:13:57.743514 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=18.368226156 podStartE2EDuration="28.743494383s" podCreationTimestamp="2025-11-28 07:13:29 +0000 UTC" firstStartedPulling="2025-11-28 07:13:46.751708481 +0000 UTC m=+1271.672104063" lastFinishedPulling="2025-11-28 07:13:57.126976698 +0000 UTC m=+1282.047372290" observedRunningTime="2025-11-28 07:13:57.736231881 +0000 UTC m=+1282.656627463" watchObservedRunningTime="2025-11-28 07:13:57.743494383 +0000 UTC m=+1282.663889965" Nov 28 07:13:57 crc kubenswrapper[4922]: I1128 07:13:57.856382 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:57 crc kubenswrapper[4922]: I1128 07:13:57.899447 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:58 crc kubenswrapper[4922]: I1128 07:13:58.710385 4922 generic.go:334] "Generic (PLEG): container finished" podID="89786935-51a3-4851-94e2-5b15218b914e" containerID="e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef" exitCode=0 Nov 28 07:13:58 crc kubenswrapper[4922]: I1128 07:13:58.710495 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" event={"ID":"89786935-51a3-4851-94e2-5b15218b914e","Type":"ContainerDied","Data":"e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef"} Nov 28 07:13:58 crc kubenswrapper[4922]: I1128 07:13:58.711375 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:58 crc kubenswrapper[4922]: I1128 07:13:58.776811 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.074761 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57dc4c6697-m7tx4"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.134257 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fbf586c4f-9g6bk"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.135517 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.138122 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.141413 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-4jk5t"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.142231 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.143154 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.147701 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbf586c4f-9g6bk"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.154208 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-4jk5t"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.190618 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovs-rundir\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.190783 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-ovsdbserver-sb\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.190863 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-dns-svc\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.190914 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-config\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.190943 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-combined-ca-bundle\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.191093 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2070fbd8-e847-4b99-ba55-4579804bbc57-config\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.191145 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwdv9\" (UniqueName: \"kubernetes.io/projected/2070fbd8-e847-4b99-ba55-4579804bbc57-kube-api-access-hwdv9\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.191176 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wggp4\" (UniqueName: \"kubernetes.io/projected/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-kube-api-access-wggp4\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.191255 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.191306 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovn-rundir\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.294496 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-dns-svc\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.295394 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-config\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.295458 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-combined-ca-bundle\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.295648 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2070fbd8-e847-4b99-ba55-4579804bbc57-config\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.298646 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwdv9\" (UniqueName: \"kubernetes.io/projected/2070fbd8-e847-4b99-ba55-4579804bbc57-kube-api-access-hwdv9\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.298712 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wggp4\" (UniqueName: \"kubernetes.io/projected/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-kube-api-access-wggp4\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.298771 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.298824 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovn-rundir\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.298878 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovs-rundir\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.298957 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-ovsdbserver-sb\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.300244 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-dns-svc\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.300751 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-ovsdbserver-sb\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.300805 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-config\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.301514 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2070fbd8-e847-4b99-ba55-4579804bbc57-config\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.301603 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovs-rundir\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.303910 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovn-rundir\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.306115 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-combined-ca-bundle\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.316065 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.319078 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wggp4\" (UniqueName: \"kubernetes.io/projected/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-kube-api-access-wggp4\") pod \"dnsmasq-dns-fbf586c4f-9g6bk\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.319587 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwdv9\" (UniqueName: \"kubernetes.io/projected/2070fbd8-e847-4b99-ba55-4579804bbc57-kube-api-access-hwdv9\") pod \"ovn-controller-metrics-4jk5t\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.449777 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8446fd7c75-29qqr"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.450160 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.453539 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.458473 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58bd875f97-xdqh2"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.464491 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.465988 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.467971 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.501340 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58bd875f97-xdqh2"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.520588 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9zkx\" (UniqueName: \"kubernetes.io/projected/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-kube-api-access-b9zkx\") pod \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.520653 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-dns-svc\") pod \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.520745 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-config\") pod \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\" (UID: \"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2\") " Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.521073 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-nb\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.521120 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-config\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.521175 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-dns-svc\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.521236 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hs9kx\" (UniqueName: \"kubernetes.io/projected/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-kube-api-access-hs9kx\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.521255 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-sb\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.523206 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aa2656f2-23d1-46c5-9cb9-eed7c3cddea2" (UID: "aa2656f2-23d1-46c5-9cb9-eed7c3cddea2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.524009 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-config" (OuterVolumeSpecName: "config") pod "aa2656f2-23d1-46c5-9cb9-eed7c3cddea2" (UID: "aa2656f2-23d1-46c5-9cb9-eed7c3cddea2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.528640 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-kube-api-access-b9zkx" (OuterVolumeSpecName: "kube-api-access-b9zkx") pod "aa2656f2-23d1-46c5-9cb9-eed7c3cddea2" (UID: "aa2656f2-23d1-46c5-9cb9-eed7c3cddea2"). InnerVolumeSpecName "kube-api-access-b9zkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.622335 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-dns-svc\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.622689 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hs9kx\" (UniqueName: \"kubernetes.io/projected/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-kube-api-access-hs9kx\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.622720 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-sb\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.622794 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-nb\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.622837 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-config\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.622905 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.622921 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9zkx\" (UniqueName: \"kubernetes.io/projected/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-kube-api-access-b9zkx\") on node \"crc\" DevicePath \"\"" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.622935 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.623769 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-dns-svc\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.623858 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-config\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.624636 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-sb\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.625203 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-nb\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.644090 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hs9kx\" (UniqueName: \"kubernetes.io/projected/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-kube-api-access-hs9kx\") pod \"dnsmasq-dns-58bd875f97-xdqh2\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.722457 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.722452 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57dc4c6697-m7tx4" event={"ID":"aa2656f2-23d1-46c5-9cb9-eed7c3cddea2","Type":"ContainerDied","Data":"d7a3c024ff2aa5b7eef34cea80b65fc5bdd9829258d4829fc2aa76343c7b3bc9"} Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.724294 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" event={"ID":"89786935-51a3-4851-94e2-5b15218b914e","Type":"ContainerStarted","Data":"89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b"} Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.724340 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" podUID="89786935-51a3-4851-94e2-5b15218b914e" containerName="dnsmasq-dns" containerID="cri-o://89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b" gracePeriod=10 Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.724429 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.729517 4922 generic.go:334] "Generic (PLEG): container finished" podID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" containerID="69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1" exitCode=0 Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.730182 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c39356f2-8f5d-45d3-8188-7d9428c4d8bf","Type":"ContainerDied","Data":"69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1"} Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.744588 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" podStartSLOduration=3.105509877 podStartE2EDuration="42.744568209s" podCreationTimestamp="2025-11-28 07:13:17 +0000 UTC" firstStartedPulling="2025-11-28 07:13:18.285435383 +0000 UTC m=+1243.205830965" lastFinishedPulling="2025-11-28 07:13:57.924493705 +0000 UTC m=+1282.844889297" observedRunningTime="2025-11-28 07:13:59.742251207 +0000 UTC m=+1284.662646789" watchObservedRunningTime="2025-11-28 07:13:59.744568209 +0000 UTC m=+1284.664963791" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.830892 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.834324 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57dc4c6697-m7tx4"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.840884 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57dc4c6697-m7tx4"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.847117 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-4jk5t"] Nov 28 07:13:59 crc kubenswrapper[4922]: I1128 07:13:59.924193 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbf586c4f-9g6bk"] Nov 28 07:13:59 crc kubenswrapper[4922]: W1128 07:13:59.930965 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5408d508_e0e4_46e7_a4a8_fd6982ad6cfe.slice/crio-644781ac4a58aed9756401a1e2268a96d7b03e5d8d7beba51cf4d2f39e366583 WatchSource:0}: Error finding container 644781ac4a58aed9756401a1e2268a96d7b03e5d8d7beba51cf4d2f39e366583: Status 404 returned error can't find the container with id 644781ac4a58aed9756401a1e2268a96d7b03e5d8d7beba51cf4d2f39e366583 Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.182486 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.361291 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-dns-svc\") pod \"89786935-51a3-4851-94e2-5b15218b914e\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.361424 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ln7v\" (UniqueName: \"kubernetes.io/projected/89786935-51a3-4851-94e2-5b15218b914e-kube-api-access-6ln7v\") pod \"89786935-51a3-4851-94e2-5b15218b914e\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.361480 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-config\") pod \"89786935-51a3-4851-94e2-5b15218b914e\" (UID: \"89786935-51a3-4851-94e2-5b15218b914e\") " Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.365425 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89786935-51a3-4851-94e2-5b15218b914e-kube-api-access-6ln7v" (OuterVolumeSpecName: "kube-api-access-6ln7v") pod "89786935-51a3-4851-94e2-5b15218b914e" (UID: "89786935-51a3-4851-94e2-5b15218b914e"). InnerVolumeSpecName "kube-api-access-6ln7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.367145 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.397143 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-config" (OuterVolumeSpecName: "config") pod "89786935-51a3-4851-94e2-5b15218b914e" (UID: "89786935-51a3-4851-94e2-5b15218b914e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.407846 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "89786935-51a3-4851-94e2-5b15218b914e" (UID: "89786935-51a3-4851-94e2-5b15218b914e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.408912 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.443459 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58bd875f97-xdqh2"] Nov 28 07:14:00 crc kubenswrapper[4922]: W1128 07:14:00.445615 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a769fc1_9d74_46f9_b3e3_256cd3a930ff.slice/crio-0e91539fc0907e9f1d6ab2d67c460fadd1808d08be875440d707256919dae39e WatchSource:0}: Error finding container 0e91539fc0907e9f1d6ab2d67c460fadd1808d08be875440d707256919dae39e: Status 404 returned error can't find the container with id 0e91539fc0907e9f1d6ab2d67c460fadd1808d08be875440d707256919dae39e Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.463358 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ln7v\" (UniqueName: \"kubernetes.io/projected/89786935-51a3-4851-94e2-5b15218b914e-kube-api-access-6ln7v\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.463386 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.463396 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/89786935-51a3-4851-94e2-5b15218b914e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.761547 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c39356f2-8f5d-45d3-8188-7d9428c4d8bf","Type":"ContainerStarted","Data":"52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.778521 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-4jk5t" event={"ID":"2070fbd8-e847-4b99-ba55-4579804bbc57","Type":"ContainerStarted","Data":"7c57b6ea918e23a8d2aa6a00589247875694d1068bfad7021c5c76b0cae05bf4"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.778578 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-4jk5t" event={"ID":"2070fbd8-e847-4b99-ba55-4579804bbc57","Type":"ContainerStarted","Data":"fd772f1a0597fd171709b6d336192b3f6450b377f74b33a65abea622abc9a97f"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.796290 4922 generic.go:334] "Generic (PLEG): container finished" podID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerID="e80a187490ec82864b3efacf3b51a62d769bcb518d1bc1c78a7c77f93fabb92e" exitCode=0 Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.796381 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" event={"ID":"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe","Type":"ContainerDied","Data":"e80a187490ec82864b3efacf3b51a62d769bcb518d1bc1c78a7c77f93fabb92e"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.796406 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" event={"ID":"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe","Type":"ContainerStarted","Data":"644781ac4a58aed9756401a1e2268a96d7b03e5d8d7beba51cf4d2f39e366583"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.798343 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371994.056446 podStartE2EDuration="42.798330163s" podCreationTimestamp="2025-11-28 07:13:18 +0000 UTC" firstStartedPulling="2025-11-28 07:13:20.302092311 +0000 UTC m=+1245.222487893" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:00.796188996 +0000 UTC m=+1285.716584588" watchObservedRunningTime="2025-11-28 07:14:00.798330163 +0000 UTC m=+1285.718725745" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.813582 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" event={"ID":"9a769fc1-9d74-46f9-b3e3-256cd3a930ff","Type":"ContainerStarted","Data":"11cb8f831009f0eff7259bbd41fe73ea654c518756df7306b462fa63ebda3db2"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.813623 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" event={"ID":"9a769fc1-9d74-46f9-b3e3-256cd3a930ff","Type":"ContainerStarted","Data":"0e91539fc0907e9f1d6ab2d67c460fadd1808d08be875440d707256919dae39e"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.828491 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-4jk5t" podStartSLOduration=1.828473023 podStartE2EDuration="1.828473023s" podCreationTimestamp="2025-11-28 07:13:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:00.82685329 +0000 UTC m=+1285.747248882" watchObservedRunningTime="2025-11-28 07:14:00.828473023 +0000 UTC m=+1285.748868605" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.835170 4922 generic.go:334] "Generic (PLEG): container finished" podID="89786935-51a3-4851-94e2-5b15218b914e" containerID="89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b" exitCode=0 Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.835340 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" event={"ID":"89786935-51a3-4851-94e2-5b15218b914e","Type":"ContainerDied","Data":"89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.835388 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" event={"ID":"89786935-51a3-4851-94e2-5b15218b914e","Type":"ContainerDied","Data":"b893fef02113ba4bfa9bdc86aa4ef22a610e418602ee48956020137c9a37c5ec"} Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.835405 4922 scope.go:117] "RemoveContainer" containerID="89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.835559 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8446fd7c75-29qqr" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.837268 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.911110 4922 scope.go:117] "RemoveContainer" containerID="e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.912846 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8446fd7c75-29qqr"] Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.922747 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8446fd7c75-29qqr"] Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.939265 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.949580 4922 scope.go:117] "RemoveContainer" containerID="89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b" Nov 28 07:14:00 crc kubenswrapper[4922]: E1128 07:14:00.950095 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b\": container with ID starting with 89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b not found: ID does not exist" containerID="89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.950122 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b"} err="failed to get container status \"89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b\": rpc error: code = NotFound desc = could not find container \"89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b\": container with ID starting with 89435e8a57d6209fc4a7fcf2f0c4bc1d6c75176b7d5386737b24707a08af0d8b not found: ID does not exist" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.950142 4922 scope.go:117] "RemoveContainer" containerID="e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef" Nov 28 07:14:00 crc kubenswrapper[4922]: E1128 07:14:00.950395 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef\": container with ID starting with e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef not found: ID does not exist" containerID="e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef" Nov 28 07:14:00 crc kubenswrapper[4922]: I1128 07:14:00.950416 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef"} err="failed to get container status \"e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef\": rpc error: code = NotFound desc = could not find container \"e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef\": container with ID starting with e77ef4f2b656f6b7c9ae9f08c2e96d019b834e44090e661dcfc9c84a3cad0aef not found: ID does not exist" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.347407 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.348538 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.392850 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 07:14:01 crc kubenswrapper[4922]: E1128 07:14:01.393489 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89786935-51a3-4851-94e2-5b15218b914e" containerName="dnsmasq-dns" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.393576 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="89786935-51a3-4851-94e2-5b15218b914e" containerName="dnsmasq-dns" Nov 28 07:14:01 crc kubenswrapper[4922]: E1128 07:14:01.393647 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89786935-51a3-4851-94e2-5b15218b914e" containerName="init" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.393710 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="89786935-51a3-4851-94e2-5b15218b914e" containerName="init" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.393985 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="89786935-51a3-4851-94e2-5b15218b914e" containerName="dnsmasq-dns" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.395191 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.400783 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.400831 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-v9d9f" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.401028 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.401167 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.421589 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89786935-51a3-4851-94e2-5b15218b914e" path="/var/lib/kubelet/pods/89786935-51a3-4851-94e2-5b15218b914e/volumes" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.422508 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa2656f2-23d1-46c5-9cb9-eed7c3cddea2" path="/var/lib/kubelet/pods/aa2656f2-23d1-46c5-9cb9-eed7c3cddea2/volumes" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.422924 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.472120 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.482302 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-config\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.482595 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.482708 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htpqg\" (UniqueName: \"kubernetes.io/projected/e070595b-ded5-4ba1-8e5d-10dee3f64439-kube-api-access-htpqg\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.482796 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.482868 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-scripts\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.482957 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.483089 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.584362 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.584411 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htpqg\" (UniqueName: \"kubernetes.io/projected/e070595b-ded5-4ba1-8e5d-10dee3f64439-kube-api-access-htpqg\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.584435 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.584451 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-scripts\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.584480 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.584531 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.584556 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-config\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.584999 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.585362 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-config\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.586124 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-scripts\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.589422 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.589606 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.589715 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.601686 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htpqg\" (UniqueName: \"kubernetes.io/projected/e070595b-ded5-4ba1-8e5d-10dee3f64439-kube-api-access-htpqg\") pod \"ovn-northd-0\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.670342 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.723891 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.856811 4922 generic.go:334] "Generic (PLEG): container finished" podID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" containerID="11cb8f831009f0eff7259bbd41fe73ea654c518756df7306b462fa63ebda3db2" exitCode=0 Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.856866 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" event={"ID":"9a769fc1-9d74-46f9-b3e3-256cd3a930ff","Type":"ContainerDied","Data":"11cb8f831009f0eff7259bbd41fe73ea654c518756df7306b462fa63ebda3db2"} Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.857211 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.857244 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" event={"ID":"9a769fc1-9d74-46f9-b3e3-256cd3a930ff","Type":"ContainerStarted","Data":"4f8867321a6c4caf5bf9e21f0c86983fce57202426dab16de189b9a16c9e3ae7"} Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.865611 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" event={"ID":"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe","Type":"ContainerStarted","Data":"339298e0efb6ecacf0a683f5348d4f5c3fb972e3d52376870fad160f77cca5ec"} Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.865645 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.877285 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" podStartSLOduration=2.877263975 podStartE2EDuration="2.877263975s" podCreationTimestamp="2025-11-28 07:13:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:01.87178284 +0000 UTC m=+1286.792178422" watchObservedRunningTime="2025-11-28 07:14:01.877263975 +0000 UTC m=+1286.797659557" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.946546 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 07:14:01 crc kubenswrapper[4922]: I1128 07:14:01.980417 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" podStartSLOduration=2.980394011 podStartE2EDuration="2.980394011s" podCreationTimestamp="2025-11-28 07:13:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:01.896345112 +0000 UTC m=+1286.816740704" watchObservedRunningTime="2025-11-28 07:14:01.980394011 +0000 UTC m=+1286.900789593" Nov 28 07:14:02 crc kubenswrapper[4922]: I1128 07:14:02.181571 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 07:14:02 crc kubenswrapper[4922]: I1128 07:14:02.872942 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e070595b-ded5-4ba1-8e5d-10dee3f64439","Type":"ContainerStarted","Data":"820b9b4afab004f353730beeb209fca5f5cabbd4b28c95731bb81073037f35cb"} Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.703604 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.804332 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58bd875f97-xdqh2"] Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.863042 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c8cb8df65-x8pfz"] Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.864301 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.895727 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c8cb8df65-x8pfz"] Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.912625 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" podUID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" containerName="dnsmasq-dns" containerID="cri-o://4f8867321a6c4caf5bf9e21f0c86983fce57202426dab16de189b9a16c9e3ae7" gracePeriod=10 Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.933299 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5524w\" (UniqueName: \"kubernetes.io/projected/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-kube-api-access-5524w\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.933414 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-config\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.933438 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-nb\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.933462 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-dns-svc\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:03 crc kubenswrapper[4922]: I1128 07:14:03.933484 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-sb\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.035443 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-nb\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.035564 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-dns-svc\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.035606 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-sb\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.035691 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5524w\" (UniqueName: \"kubernetes.io/projected/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-kube-api-access-5524w\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.035925 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-config\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.037249 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-config\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.038885 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-sb\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.039390 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-nb\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.041207 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-dns-svc\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.074718 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5524w\" (UniqueName: \"kubernetes.io/projected/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-kube-api-access-5524w\") pod \"dnsmasq-dns-7c8cb8df65-x8pfz\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.190954 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.679744 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c8cb8df65-x8pfz"] Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.888746 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.893987 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.895651 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.896011 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-ncrq7" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.896767 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.896939 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.918688 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.940116 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" event={"ID":"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1","Type":"ContainerStarted","Data":"d7e825265081179f3512c1ec8f2eda2ccfaff80fc5afdc872bde7bef7294bc1d"} Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.950059 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-lock\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.950137 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-cache\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.950166 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.950198 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:04 crc kubenswrapper[4922]: I1128 07:14:04.950262 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jcg5\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-kube-api-access-4jcg5\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.052083 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-cache\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.052159 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.052248 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: E1128 07:14:05.052324 4922 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 07:14:05 crc kubenswrapper[4922]: E1128 07:14:05.052352 4922 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.052329 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jcg5\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-kube-api-access-4jcg5\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: E1128 07:14:05.052412 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift podName:46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe nodeName:}" failed. No retries permitted until 2025-11-28 07:14:05.552386266 +0000 UTC m=+1290.472781848 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift") pod "swift-storage-0" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe") : configmap "swift-ring-files" not found Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.052514 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.052542 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-lock\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.052607 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-cache\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.052968 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-lock\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.068557 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jcg5\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-kube-api-access-4jcg5\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.070729 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.436688 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-6kzln"] Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.437909 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.440882 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.441453 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.442058 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.457385 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-6kzln"] Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.466175 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-dispersionconf\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.466239 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-swiftconf\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.466279 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-combined-ca-bundle\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.466300 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7xtv\" (UniqueName: \"kubernetes.io/projected/b78e6a70-a315-4c6d-8731-a21335e18766-kube-api-access-m7xtv\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.466332 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-scripts\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.466366 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-ring-data-devices\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.466387 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b78e6a70-a315-4c6d-8731-a21335e18766-etc-swift\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.568149 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-combined-ca-bundle\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.568410 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.568430 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7xtv\" (UniqueName: \"kubernetes.io/projected/b78e6a70-a315-4c6d-8731-a21335e18766-kube-api-access-m7xtv\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.568451 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-scripts\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.568478 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-ring-data-devices\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.568498 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b78e6a70-a315-4c6d-8731-a21335e18766-etc-swift\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.568562 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-dispersionconf\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.568590 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-swiftconf\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: E1128 07:14:05.568597 4922 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 07:14:05 crc kubenswrapper[4922]: E1128 07:14:05.568613 4922 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 07:14:05 crc kubenswrapper[4922]: E1128 07:14:05.568655 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift podName:46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe nodeName:}" failed. No retries permitted until 2025-11-28 07:14:06.568641072 +0000 UTC m=+1291.489036654 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift") pod "swift-storage-0" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe") : configmap "swift-ring-files" not found Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.569236 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b78e6a70-a315-4c6d-8731-a21335e18766-etc-swift\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.569449 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-ring-data-devices\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.569710 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-scripts\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.572979 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-swiftconf\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.573204 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-combined-ca-bundle\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.573768 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-dispersionconf\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.591443 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7xtv\" (UniqueName: \"kubernetes.io/projected/b78e6a70-a315-4c6d-8731-a21335e18766-kube-api-access-m7xtv\") pod \"swift-ring-rebalance-6kzln\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.762546 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.952585 4922 generic.go:334] "Generic (PLEG): container finished" podID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" containerID="4f8867321a6c4caf5bf9e21f0c86983fce57202426dab16de189b9a16c9e3ae7" exitCode=0 Nov 28 07:14:05 crc kubenswrapper[4922]: I1128 07:14:05.952859 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" event={"ID":"9a769fc1-9d74-46f9-b3e3-256cd3a930ff","Type":"ContainerDied","Data":"4f8867321a6c4caf5bf9e21f0c86983fce57202426dab16de189b9a16c9e3ae7"} Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.233364 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-6kzln"] Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.599698 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:06 crc kubenswrapper[4922]: E1128 07:14:06.599883 4922 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 07:14:06 crc kubenswrapper[4922]: E1128 07:14:06.599895 4922 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 07:14:06 crc kubenswrapper[4922]: E1128 07:14:06.599934 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift podName:46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe nodeName:}" failed. No retries permitted until 2025-11-28 07:14:08.599921529 +0000 UTC m=+1293.520317111 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift") pod "swift-storage-0" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe") : configmap "swift-ring-files" not found Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.879655 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.964508 4922 generic.go:334] "Generic (PLEG): container finished" podID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" containerID="eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259" exitCode=0 Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.964580 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" event={"ID":"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1","Type":"ContainerDied","Data":"eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259"} Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.967258 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-6kzln" event={"ID":"b78e6a70-a315-4c6d-8731-a21335e18766","Type":"ContainerStarted","Data":"a4de9e9de8651a15c92dfa668c8e0cbc8fe1952ae86a80310797d8a5d6721112"} Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.970892 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" event={"ID":"9a769fc1-9d74-46f9-b3e3-256cd3a930ff","Type":"ContainerDied","Data":"0e91539fc0907e9f1d6ab2d67c460fadd1808d08be875440d707256919dae39e"} Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.970937 4922 scope.go:117] "RemoveContainer" containerID="4f8867321a6c4caf5bf9e21f0c86983fce57202426dab16de189b9a16c9e3ae7" Nov 28 07:14:06 crc kubenswrapper[4922]: I1128 07:14:06.971067 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58bd875f97-xdqh2" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.009645 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-sb\") pod \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.009722 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hs9kx\" (UniqueName: \"kubernetes.io/projected/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-kube-api-access-hs9kx\") pod \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.009887 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-dns-svc\") pod \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.009968 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-nb\") pod \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.009995 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-config\") pod \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\" (UID: \"9a769fc1-9d74-46f9-b3e3-256cd3a930ff\") " Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.028504 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-kube-api-access-hs9kx" (OuterVolumeSpecName: "kube-api-access-hs9kx") pod "9a769fc1-9d74-46f9-b3e3-256cd3a930ff" (UID: "9a769fc1-9d74-46f9-b3e3-256cd3a930ff"). InnerVolumeSpecName "kube-api-access-hs9kx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.055524 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9a769fc1-9d74-46f9-b3e3-256cd3a930ff" (UID: "9a769fc1-9d74-46f9-b3e3-256cd3a930ff"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.070092 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9a769fc1-9d74-46f9-b3e3-256cd3a930ff" (UID: "9a769fc1-9d74-46f9-b3e3-256cd3a930ff"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.070837 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-config" (OuterVolumeSpecName: "config") pod "9a769fc1-9d74-46f9-b3e3-256cd3a930ff" (UID: "9a769fc1-9d74-46f9-b3e3-256cd3a930ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.087300 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9a769fc1-9d74-46f9-b3e3-256cd3a930ff" (UID: "9a769fc1-9d74-46f9-b3e3-256cd3a930ff"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.112065 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.112099 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hs9kx\" (UniqueName: \"kubernetes.io/projected/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-kube-api-access-hs9kx\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.112110 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.112119 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.112127 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a769fc1-9d74-46f9-b3e3-256cd3a930ff-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.248801 4922 scope.go:117] "RemoveContainer" containerID="11cb8f831009f0eff7259bbd41fe73ea654c518756df7306b462fa63ebda3db2" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.363959 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58bd875f97-xdqh2"] Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.374949 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58bd875f97-xdqh2"] Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.413590 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" path="/var/lib/kubelet/pods/9a769fc1-9d74-46f9-b3e3-256cd3a930ff/volumes" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.984075 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" event={"ID":"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1","Type":"ContainerStarted","Data":"9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1"} Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.984444 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.986209 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e070595b-ded5-4ba1-8e5d-10dee3f64439","Type":"ContainerStarted","Data":"080f55e8e51ff6a214b0fe9fe62cc38adee207ae9bc0a4e40e78d515f29e447e"} Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.986258 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e070595b-ded5-4ba1-8e5d-10dee3f64439","Type":"ContainerStarted","Data":"ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068"} Nov 28 07:14:07 crc kubenswrapper[4922]: I1128 07:14:07.986422 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 07:14:08 crc kubenswrapper[4922]: I1128 07:14:08.045672 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" podStartSLOduration=5.045654972 podStartE2EDuration="5.045654972s" podCreationTimestamp="2025-11-28 07:14:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:08.016976442 +0000 UTC m=+1292.937372034" watchObservedRunningTime="2025-11-28 07:14:08.045654972 +0000 UTC m=+1292.966050554" Nov 28 07:14:08 crc kubenswrapper[4922]: I1128 07:14:08.643324 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:08 crc kubenswrapper[4922]: E1128 07:14:08.643594 4922 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 07:14:08 crc kubenswrapper[4922]: E1128 07:14:08.643798 4922 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 07:14:08 crc kubenswrapper[4922]: E1128 07:14:08.643849 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift podName:46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe nodeName:}" failed. No retries permitted until 2025-11-28 07:14:12.64383281 +0000 UTC m=+1297.564228392 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift") pod "swift-storage-0" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe") : configmap "swift-ring-files" not found Nov 28 07:14:09 crc kubenswrapper[4922]: I1128 07:14:09.452437 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:14:09 crc kubenswrapper[4922]: I1128 07:14:09.482436 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.387756924 podStartE2EDuration="8.482420757s" podCreationTimestamp="2025-11-28 07:14:01 +0000 UTC" firstStartedPulling="2025-11-28 07:14:02.182993826 +0000 UTC m=+1287.103389408" lastFinishedPulling="2025-11-28 07:14:07.277657659 +0000 UTC m=+1292.198053241" observedRunningTime="2025-11-28 07:14:08.045612771 +0000 UTC m=+1292.966008373" watchObservedRunningTime="2025-11-28 07:14:09.482420757 +0000 UTC m=+1294.402816339" Nov 28 07:14:09 crc kubenswrapper[4922]: I1128 07:14:09.902432 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 07:14:09 crc kubenswrapper[4922]: I1128 07:14:09.902495 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 07:14:09 crc kubenswrapper[4922]: I1128 07:14:09.982544 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 07:14:10 crc kubenswrapper[4922]: I1128 07:14:10.063701 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.299667 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f279-account-create-update-jh2bs"] Nov 28 07:14:11 crc kubenswrapper[4922]: E1128 07:14:11.301051 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" containerName="dnsmasq-dns" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.301077 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" containerName="dnsmasq-dns" Nov 28 07:14:11 crc kubenswrapper[4922]: E1128 07:14:11.301148 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" containerName="init" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.301164 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" containerName="init" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.301790 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a769fc1-9d74-46f9-b3e3-256cd3a930ff" containerName="dnsmasq-dns" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.302962 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.307182 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.321407 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f279-account-create-update-jh2bs"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.327912 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-c9zx7"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.329369 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.345643 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-c9zx7"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.407852 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8dd495-8375-4779-9d6f-db1c25affa16-operator-scripts\") pod \"keystone-db-create-c9zx7\" (UID: \"9b8dd495-8375-4779-9d6f-db1c25affa16\") " pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.407906 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9br5n\" (UniqueName: \"kubernetes.io/projected/9b8dd495-8375-4779-9d6f-db1c25affa16-kube-api-access-9br5n\") pod \"keystone-db-create-c9zx7\" (UID: \"9b8dd495-8375-4779-9d6f-db1c25affa16\") " pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.408031 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj56d\" (UniqueName: \"kubernetes.io/projected/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-kube-api-access-mj56d\") pod \"keystone-f279-account-create-update-jh2bs\" (UID: \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\") " pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.408115 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-operator-scripts\") pod \"keystone-f279-account-create-update-jh2bs\" (UID: \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\") " pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.505602 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-h9s89"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.506864 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h9s89" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.509759 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj56d\" (UniqueName: \"kubernetes.io/projected/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-kube-api-access-mj56d\") pod \"keystone-f279-account-create-update-jh2bs\" (UID: \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\") " pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.509823 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-operator-scripts\") pod \"keystone-f279-account-create-update-jh2bs\" (UID: \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\") " pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.509913 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8dd495-8375-4779-9d6f-db1c25affa16-operator-scripts\") pod \"keystone-db-create-c9zx7\" (UID: \"9b8dd495-8375-4779-9d6f-db1c25affa16\") " pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.509959 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9br5n\" (UniqueName: \"kubernetes.io/projected/9b8dd495-8375-4779-9d6f-db1c25affa16-kube-api-access-9br5n\") pod \"keystone-db-create-c9zx7\" (UID: \"9b8dd495-8375-4779-9d6f-db1c25affa16\") " pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.510782 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8dd495-8375-4779-9d6f-db1c25affa16-operator-scripts\") pod \"keystone-db-create-c9zx7\" (UID: \"9b8dd495-8375-4779-9d6f-db1c25affa16\") " pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.510830 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-operator-scripts\") pod \"keystone-f279-account-create-update-jh2bs\" (UID: \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\") " pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.514111 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-h9s89"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.538171 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj56d\" (UniqueName: \"kubernetes.io/projected/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-kube-api-access-mj56d\") pod \"keystone-f279-account-create-update-jh2bs\" (UID: \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\") " pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.540580 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9br5n\" (UniqueName: \"kubernetes.io/projected/9b8dd495-8375-4779-9d6f-db1c25affa16-kube-api-access-9br5n\") pod \"keystone-db-create-c9zx7\" (UID: \"9b8dd495-8375-4779-9d6f-db1c25affa16\") " pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.583946 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-dbc9-account-create-update-rhbx2"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.584921 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.587179 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.597985 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-dbc9-account-create-update-rhbx2"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.614278 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d0a1633-2540-4354-93e9-8e963e8245f0-operator-scripts\") pod \"placement-db-create-h9s89\" (UID: \"2d0a1633-2540-4354-93e9-8e963e8245f0\") " pod="openstack/placement-db-create-h9s89" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.614391 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p48p\" (UniqueName: \"kubernetes.io/projected/2d0a1633-2540-4354-93e9-8e963e8245f0-kube-api-access-8p48p\") pod \"placement-db-create-h9s89\" (UID: \"2d0a1633-2540-4354-93e9-8e963e8245f0\") " pod="openstack/placement-db-create-h9s89" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.645472 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.655184 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.716414 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce9e0923-85c6-48cc-bfa7-9b51637a188d-operator-scripts\") pod \"placement-dbc9-account-create-update-rhbx2\" (UID: \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\") " pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.716486 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p48p\" (UniqueName: \"kubernetes.io/projected/2d0a1633-2540-4354-93e9-8e963e8245f0-kube-api-access-8p48p\") pod \"placement-db-create-h9s89\" (UID: \"2d0a1633-2540-4354-93e9-8e963e8245f0\") " pod="openstack/placement-db-create-h9s89" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.716553 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zrlv\" (UniqueName: \"kubernetes.io/projected/ce9e0923-85c6-48cc-bfa7-9b51637a188d-kube-api-access-5zrlv\") pod \"placement-dbc9-account-create-update-rhbx2\" (UID: \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\") " pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.716655 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d0a1633-2540-4354-93e9-8e963e8245f0-operator-scripts\") pod \"placement-db-create-h9s89\" (UID: \"2d0a1633-2540-4354-93e9-8e963e8245f0\") " pod="openstack/placement-db-create-h9s89" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.717491 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d0a1633-2540-4354-93e9-8e963e8245f0-operator-scripts\") pod \"placement-db-create-h9s89\" (UID: \"2d0a1633-2540-4354-93e9-8e963e8245f0\") " pod="openstack/placement-db-create-h9s89" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.738886 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p48p\" (UniqueName: \"kubernetes.io/projected/2d0a1633-2540-4354-93e9-8e963e8245f0-kube-api-access-8p48p\") pod \"placement-db-create-h9s89\" (UID: \"2d0a1633-2540-4354-93e9-8e963e8245f0\") " pod="openstack/placement-db-create-h9s89" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.807645 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-n2fp7"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.809087 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.816715 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-n2fp7"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.817781 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce9e0923-85c6-48cc-bfa7-9b51637a188d-operator-scripts\") pod \"placement-dbc9-account-create-update-rhbx2\" (UID: \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\") " pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.817869 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zrlv\" (UniqueName: \"kubernetes.io/projected/ce9e0923-85c6-48cc-bfa7-9b51637a188d-kube-api-access-5zrlv\") pod \"placement-dbc9-account-create-update-rhbx2\" (UID: \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\") " pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.818635 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce9e0923-85c6-48cc-bfa7-9b51637a188d-operator-scripts\") pod \"placement-dbc9-account-create-update-rhbx2\" (UID: \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\") " pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.825705 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h9s89" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.880694 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zrlv\" (UniqueName: \"kubernetes.io/projected/ce9e0923-85c6-48cc-bfa7-9b51637a188d-kube-api-access-5zrlv\") pod \"placement-dbc9-account-create-update-rhbx2\" (UID: \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\") " pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.919257 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtjmc\" (UniqueName: \"kubernetes.io/projected/36015a39-56bc-437e-9cdb-93f8ad059c45-kube-api-access-qtjmc\") pod \"glance-db-create-n2fp7\" (UID: \"36015a39-56bc-437e-9cdb-93f8ad059c45\") " pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.919759 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36015a39-56bc-437e-9cdb-93f8ad059c45-operator-scripts\") pod \"glance-db-create-n2fp7\" (UID: \"36015a39-56bc-437e-9cdb-93f8ad059c45\") " pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.931543 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.949361 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-75df-account-create-update-mxv52"] Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.950422 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.952237 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 07:14:11 crc kubenswrapper[4922]: I1128 07:14:11.971356 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-75df-account-create-update-mxv52"] Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.021688 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtjmc\" (UniqueName: \"kubernetes.io/projected/36015a39-56bc-437e-9cdb-93f8ad059c45-kube-api-access-qtjmc\") pod \"glance-db-create-n2fp7\" (UID: \"36015a39-56bc-437e-9cdb-93f8ad059c45\") " pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.021812 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l85pb\" (UniqueName: \"kubernetes.io/projected/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-kube-api-access-l85pb\") pod \"glance-75df-account-create-update-mxv52\" (UID: \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\") " pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.021845 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36015a39-56bc-437e-9cdb-93f8ad059c45-operator-scripts\") pod \"glance-db-create-n2fp7\" (UID: \"36015a39-56bc-437e-9cdb-93f8ad059c45\") " pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.021879 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-operator-scripts\") pod \"glance-75df-account-create-update-mxv52\" (UID: \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\") " pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.022596 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36015a39-56bc-437e-9cdb-93f8ad059c45-operator-scripts\") pod \"glance-db-create-n2fp7\" (UID: \"36015a39-56bc-437e-9cdb-93f8ad059c45\") " pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.046962 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtjmc\" (UniqueName: \"kubernetes.io/projected/36015a39-56bc-437e-9cdb-93f8ad059c45-kube-api-access-qtjmc\") pod \"glance-db-create-n2fp7\" (UID: \"36015a39-56bc-437e-9cdb-93f8ad059c45\") " pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.123887 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l85pb\" (UniqueName: \"kubernetes.io/projected/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-kube-api-access-l85pb\") pod \"glance-75df-account-create-update-mxv52\" (UID: \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\") " pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.123986 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-operator-scripts\") pod \"glance-75df-account-create-update-mxv52\" (UID: \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\") " pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.124662 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.124785 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-operator-scripts\") pod \"glance-75df-account-create-update-mxv52\" (UID: \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\") " pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.138900 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l85pb\" (UniqueName: \"kubernetes.io/projected/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-kube-api-access-l85pb\") pod \"glance-75df-account-create-update-mxv52\" (UID: \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\") " pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.291805 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:12 crc kubenswrapper[4922]: I1128 07:14:12.732858 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:12 crc kubenswrapper[4922]: E1128 07:14:12.733023 4922 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 07:14:12 crc kubenswrapper[4922]: E1128 07:14:12.733036 4922 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 07:14:12 crc kubenswrapper[4922]: E1128 07:14:12.733081 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift podName:46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe nodeName:}" failed. No retries permitted until 2025-11-28 07:14:20.733067621 +0000 UTC m=+1305.653463203 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift") pod "swift-storage-0" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe") : configmap "swift-ring-files" not found Nov 28 07:14:14 crc kubenswrapper[4922]: I1128 07:14:14.193286 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:14:14 crc kubenswrapper[4922]: I1128 07:14:14.254944 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fbf586c4f-9g6bk"] Nov 28 07:14:14 crc kubenswrapper[4922]: I1128 07:14:14.255178 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" podUID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerName="dnsmasq-dns" containerID="cri-o://339298e0efb6ecacf0a683f5348d4f5c3fb972e3d52376870fad160f77cca5ec" gracePeriod=10 Nov 28 07:14:14 crc kubenswrapper[4922]: I1128 07:14:14.451648 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" podUID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Nov 28 07:14:16 crc kubenswrapper[4922]: I1128 07:14:16.058096 4922 generic.go:334] "Generic (PLEG): container finished" podID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerID="339298e0efb6ecacf0a683f5348d4f5c3fb972e3d52376870fad160f77cca5ec" exitCode=0 Nov 28 07:14:16 crc kubenswrapper[4922]: I1128 07:14:16.058267 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" event={"ID":"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe","Type":"ContainerDied","Data":"339298e0efb6ecacf0a683f5348d4f5c3fb972e3d52376870fad160f77cca5ec"} Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.172891 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.234593 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-config\") pod \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.234738 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wggp4\" (UniqueName: \"kubernetes.io/projected/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-kube-api-access-wggp4\") pod \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.234778 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-ovsdbserver-sb\") pod \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.234870 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-dns-svc\") pod \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\" (UID: \"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe\") " Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.239605 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-kube-api-access-wggp4" (OuterVolumeSpecName: "kube-api-access-wggp4") pod "5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" (UID: "5408d508-e0e4-46e7-a4a8-fd6982ad6cfe"). InnerVolumeSpecName "kube-api-access-wggp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.336496 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wggp4\" (UniqueName: \"kubernetes.io/projected/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-kube-api-access-wggp4\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.344350 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-config" (OuterVolumeSpecName: "config") pod "5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" (UID: "5408d508-e0e4-46e7-a4a8-fd6982ad6cfe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.362459 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" (UID: "5408d508-e0e4-46e7-a4a8-fd6982ad6cfe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.367719 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" (UID: "5408d508-e0e4-46e7-a4a8-fd6982ad6cfe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.435652 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-75df-account-create-update-mxv52"] Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.438131 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.438162 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.438171 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.619214 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-dbc9-account-create-update-rhbx2"] Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.634357 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f279-account-create-update-jh2bs"] Nov 28 07:14:17 crc kubenswrapper[4922]: W1128 07:14:17.639724 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a229aad_3dbd_40d3_85ef_38fe07deaf5b.slice/crio-add549adf2d1116c77c1775078c44c1b02ec39e652f777643f6e3947cb0cbca3 WatchSource:0}: Error finding container add549adf2d1116c77c1775078c44c1b02ec39e652f777643f6e3947cb0cbca3: Status 404 returned error can't find the container with id add549adf2d1116c77c1775078c44c1b02ec39e652f777643f6e3947cb0cbca3 Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.650643 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-n2fp7"] Nov 28 07:14:17 crc kubenswrapper[4922]: W1128 07:14:17.650978 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b8dd495_8375_4779_9d6f_db1c25affa16.slice/crio-ce19099eb9b7fa2fc69a35e243f1b1e93b713106a4638fc7b33f13d167faec47 WatchSource:0}: Error finding container ce19099eb9b7fa2fc69a35e243f1b1e93b713106a4638fc7b33f13d167faec47: Status 404 returned error can't find the container with id ce19099eb9b7fa2fc69a35e243f1b1e93b713106a4638fc7b33f13d167faec47 Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.662426 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-h9s89"] Nov 28 07:14:17 crc kubenswrapper[4922]: I1128 07:14:17.669144 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-c9zx7"] Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.077434 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f279-account-create-update-jh2bs" event={"ID":"7a229aad-3dbd-40d3-85ef-38fe07deaf5b","Type":"ContainerStarted","Data":"d63f5032ec7eec07123d8a155a004097f6b3f389d18cf7e99c5e839856a73538"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.077795 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f279-account-create-update-jh2bs" event={"ID":"7a229aad-3dbd-40d3-85ef-38fe07deaf5b","Type":"ContainerStarted","Data":"add549adf2d1116c77c1775078c44c1b02ec39e652f777643f6e3947cb0cbca3"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.079527 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-6kzln" event={"ID":"b78e6a70-a315-4c6d-8731-a21335e18766","Type":"ContainerStarted","Data":"60b74196fbe2ff1313cee7aed6bbe3eed676d5ed07bfc2f788f42483321a3e19"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.082372 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-n2fp7" event={"ID":"36015a39-56bc-437e-9cdb-93f8ad059c45","Type":"ContainerStarted","Data":"7f03b582aec1162263159ea6da47f876369f0dc7f8a0f4cc453baf73b8acf408"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.082442 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-n2fp7" event={"ID":"36015a39-56bc-437e-9cdb-93f8ad059c45","Type":"ContainerStarted","Data":"d3ba3f9cedfcb9ba42222a4aa16225f5b2102c950cff1620114f67959f8fa0ca"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.084068 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h9s89" event={"ID":"2d0a1633-2540-4354-93e9-8e963e8245f0","Type":"ContainerStarted","Data":"dc917e5ac2177fcf0349991256f89d7daab4e05658a7642e65f2e84ec52e5091"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.084107 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h9s89" event={"ID":"2d0a1633-2540-4354-93e9-8e963e8245f0","Type":"ContainerStarted","Data":"fc06b482c057752602eff51989f9422bda4c2f2e8cb3c22b5847e5668febb710"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.089958 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" event={"ID":"5408d508-e0e4-46e7-a4a8-fd6982ad6cfe","Type":"ContainerDied","Data":"644781ac4a58aed9756401a1e2268a96d7b03e5d8d7beba51cf4d2f39e366583"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.089994 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbf586c4f-9g6bk" Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.090028 4922 scope.go:117] "RemoveContainer" containerID="339298e0efb6ecacf0a683f5348d4f5c3fb972e3d52376870fad160f77cca5ec" Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.093962 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c9zx7" event={"ID":"9b8dd495-8375-4779-9d6f-db1c25affa16","Type":"ContainerStarted","Data":"bb427b6ae33fbc331bbaba903c4b8ab50fb72e61639236cfc209beabc0998499"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.094023 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c9zx7" event={"ID":"9b8dd495-8375-4779-9d6f-db1c25affa16","Type":"ContainerStarted","Data":"ce19099eb9b7fa2fc69a35e243f1b1e93b713106a4638fc7b33f13d167faec47"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.094297 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-f279-account-create-update-jh2bs" podStartSLOduration=7.094275183 podStartE2EDuration="7.094275183s" podCreationTimestamp="2025-11-28 07:14:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:18.091793507 +0000 UTC m=+1303.012189089" watchObservedRunningTime="2025-11-28 07:14:18.094275183 +0000 UTC m=+1303.014670785" Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.100040 4922 generic.go:334] "Generic (PLEG): container finished" podID="ce9e0923-85c6-48cc-bfa7-9b51637a188d" containerID="6be1d97db640d5474aaa7b9a9a30f8a39026dbc0c5f30d3287779f0276df5d19" exitCode=0 Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.100136 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-dbc9-account-create-update-rhbx2" event={"ID":"ce9e0923-85c6-48cc-bfa7-9b51637a188d","Type":"ContainerDied","Data":"6be1d97db640d5474aaa7b9a9a30f8a39026dbc0c5f30d3287779f0276df5d19"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.100163 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-dbc9-account-create-update-rhbx2" event={"ID":"ce9e0923-85c6-48cc-bfa7-9b51637a188d","Type":"ContainerStarted","Data":"7e4e6429b961b30a28e2a5d70f2aef9de5165c43a32cc679e37ad06539fb6cca"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.103802 4922 generic.go:334] "Generic (PLEG): container finished" podID="b2273c58-3ea7-4979-a39d-6e9ba47b15aa" containerID="015faea8c54059d2c396bd52bbb248295f40419e62f3e4d0f792f4a2770227e6" exitCode=0 Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.103859 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-75df-account-create-update-mxv52" event={"ID":"b2273c58-3ea7-4979-a39d-6e9ba47b15aa","Type":"ContainerDied","Data":"015faea8c54059d2c396bd52bbb248295f40419e62f3e4d0f792f4a2770227e6"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.103886 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-75df-account-create-update-mxv52" event={"ID":"b2273c58-3ea7-4979-a39d-6e9ba47b15aa","Type":"ContainerStarted","Data":"61323818777a864c1e73a2bed2f65b025cf966990d18642a0bd3605fb2081c92"} Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.117109 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-n2fp7" podStartSLOduration=7.117089489 podStartE2EDuration="7.117089489s" podCreationTimestamp="2025-11-28 07:14:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:18.114386457 +0000 UTC m=+1303.034782039" watchObservedRunningTime="2025-11-28 07:14:18.117089489 +0000 UTC m=+1303.037485091" Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.131548 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-6kzln" podStartSLOduration=2.374969938 podStartE2EDuration="13.131528221s" podCreationTimestamp="2025-11-28 07:14:05 +0000 UTC" firstStartedPulling="2025-11-28 07:14:06.244962883 +0000 UTC m=+1291.165358465" lastFinishedPulling="2025-11-28 07:14:17.001521156 +0000 UTC m=+1301.921916748" observedRunningTime="2025-11-28 07:14:18.128107761 +0000 UTC m=+1303.048503343" watchObservedRunningTime="2025-11-28 07:14:18.131528221 +0000 UTC m=+1303.051923803" Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.176589 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-c9zx7" podStartSLOduration=7.176571426 podStartE2EDuration="7.176571426s" podCreationTimestamp="2025-11-28 07:14:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:18.175641972 +0000 UTC m=+1303.096037584" watchObservedRunningTime="2025-11-28 07:14:18.176571426 +0000 UTC m=+1303.096967038" Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.249778 4922 scope.go:117] "RemoveContainer" containerID="e80a187490ec82864b3efacf3b51a62d769bcb518d1bc1c78a7c77f93fabb92e" Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.259869 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fbf586c4f-9g6bk"] Nov 28 07:14:18 crc kubenswrapper[4922]: I1128 07:14:18.282601 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fbf586c4f-9g6bk"] Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.119062 4922 generic.go:334] "Generic (PLEG): container finished" podID="9b8dd495-8375-4779-9d6f-db1c25affa16" containerID="bb427b6ae33fbc331bbaba903c4b8ab50fb72e61639236cfc209beabc0998499" exitCode=0 Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.119110 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c9zx7" event={"ID":"9b8dd495-8375-4779-9d6f-db1c25affa16","Type":"ContainerDied","Data":"bb427b6ae33fbc331bbaba903c4b8ab50fb72e61639236cfc209beabc0998499"} Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.121352 4922 generic.go:334] "Generic (PLEG): container finished" podID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerID="33fac45176641182615e4dbf61bb82f263874c2935f70fa33ef270e4398e93a2" exitCode=0 Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.121406 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458","Type":"ContainerDied","Data":"33fac45176641182615e4dbf61bb82f263874c2935f70fa33ef270e4398e93a2"} Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.123333 4922 generic.go:334] "Generic (PLEG): container finished" podID="7a229aad-3dbd-40d3-85ef-38fe07deaf5b" containerID="d63f5032ec7eec07123d8a155a004097f6b3f389d18cf7e99c5e839856a73538" exitCode=0 Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.123478 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f279-account-create-update-jh2bs" event={"ID":"7a229aad-3dbd-40d3-85ef-38fe07deaf5b","Type":"ContainerDied","Data":"d63f5032ec7eec07123d8a155a004097f6b3f389d18cf7e99c5e839856a73538"} Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.124795 4922 generic.go:334] "Generic (PLEG): container finished" podID="36015a39-56bc-437e-9cdb-93f8ad059c45" containerID="7f03b582aec1162263159ea6da47f876369f0dc7f8a0f4cc453baf73b8acf408" exitCode=0 Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.124872 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-n2fp7" event={"ID":"36015a39-56bc-437e-9cdb-93f8ad059c45","Type":"ContainerDied","Data":"7f03b582aec1162263159ea6da47f876369f0dc7f8a0f4cc453baf73b8acf408"} Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.126259 4922 generic.go:334] "Generic (PLEG): container finished" podID="2d0a1633-2540-4354-93e9-8e963e8245f0" containerID="dc917e5ac2177fcf0349991256f89d7daab4e05658a7642e65f2e84ec52e5091" exitCode=0 Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.127276 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h9s89" event={"ID":"2d0a1633-2540-4354-93e9-8e963e8245f0","Type":"ContainerDied","Data":"dc917e5ac2177fcf0349991256f89d7daab4e05658a7642e65f2e84ec52e5091"} Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.422602 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" path="/var/lib/kubelet/pods/5408d508-e0e4-46e7-a4a8-fd6982ad6cfe/volumes" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.532052 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.577722 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.584720 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce9e0923-85c6-48cc-bfa7-9b51637a188d-operator-scripts\") pod \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\" (UID: \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\") " Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.584904 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zrlv\" (UniqueName: \"kubernetes.io/projected/ce9e0923-85c6-48cc-bfa7-9b51637a188d-kube-api-access-5zrlv\") pod \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\" (UID: \"ce9e0923-85c6-48cc-bfa7-9b51637a188d\") " Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.585950 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce9e0923-85c6-48cc-bfa7-9b51637a188d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ce9e0923-85c6-48cc-bfa7-9b51637a188d" (UID: "ce9e0923-85c6-48cc-bfa7-9b51637a188d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.593461 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce9e0923-85c6-48cc-bfa7-9b51637a188d-kube-api-access-5zrlv" (OuterVolumeSpecName: "kube-api-access-5zrlv") pod "ce9e0923-85c6-48cc-bfa7-9b51637a188d" (UID: "ce9e0923-85c6-48cc-bfa7-9b51637a188d"). InnerVolumeSpecName "kube-api-access-5zrlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.648252 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h9s89" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.686190 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p48p\" (UniqueName: \"kubernetes.io/projected/2d0a1633-2540-4354-93e9-8e963e8245f0-kube-api-access-8p48p\") pod \"2d0a1633-2540-4354-93e9-8e963e8245f0\" (UID: \"2d0a1633-2540-4354-93e9-8e963e8245f0\") " Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.686367 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l85pb\" (UniqueName: \"kubernetes.io/projected/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-kube-api-access-l85pb\") pod \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\" (UID: \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\") " Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.686415 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-operator-scripts\") pod \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\" (UID: \"b2273c58-3ea7-4979-a39d-6e9ba47b15aa\") " Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.686511 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d0a1633-2540-4354-93e9-8e963e8245f0-operator-scripts\") pod \"2d0a1633-2540-4354-93e9-8e963e8245f0\" (UID: \"2d0a1633-2540-4354-93e9-8e963e8245f0\") " Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.687019 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zrlv\" (UniqueName: \"kubernetes.io/projected/ce9e0923-85c6-48cc-bfa7-9b51637a188d-kube-api-access-5zrlv\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.687067 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce9e0923-85c6-48cc-bfa7-9b51637a188d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.687180 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b2273c58-3ea7-4979-a39d-6e9ba47b15aa" (UID: "b2273c58-3ea7-4979-a39d-6e9ba47b15aa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.687185 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d0a1633-2540-4354-93e9-8e963e8245f0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2d0a1633-2540-4354-93e9-8e963e8245f0" (UID: "2d0a1633-2540-4354-93e9-8e963e8245f0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.689683 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d0a1633-2540-4354-93e9-8e963e8245f0-kube-api-access-8p48p" (OuterVolumeSpecName: "kube-api-access-8p48p") pod "2d0a1633-2540-4354-93e9-8e963e8245f0" (UID: "2d0a1633-2540-4354-93e9-8e963e8245f0"). InnerVolumeSpecName "kube-api-access-8p48p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.689876 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-kube-api-access-l85pb" (OuterVolumeSpecName: "kube-api-access-l85pb") pod "b2273c58-3ea7-4979-a39d-6e9ba47b15aa" (UID: "b2273c58-3ea7-4979-a39d-6e9ba47b15aa"). InnerVolumeSpecName "kube-api-access-l85pb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.787935 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d0a1633-2540-4354-93e9-8e963e8245f0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.787989 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p48p\" (UniqueName: \"kubernetes.io/projected/2d0a1633-2540-4354-93e9-8e963e8245f0-kube-api-access-8p48p\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.788011 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l85pb\" (UniqueName: \"kubernetes.io/projected/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-kube-api-access-l85pb\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:19 crc kubenswrapper[4922]: I1128 07:14:19.788030 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2273c58-3ea7-4979-a39d-6e9ba47b15aa-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.136930 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-dbc9-account-create-update-rhbx2" event={"ID":"ce9e0923-85c6-48cc-bfa7-9b51637a188d","Type":"ContainerDied","Data":"7e4e6429b961b30a28e2a5d70f2aef9de5165c43a32cc679e37ad06539fb6cca"} Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.137183 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e4e6429b961b30a28e2a5d70f2aef9de5165c43a32cc679e37ad06539fb6cca" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.137256 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-dbc9-account-create-update-rhbx2" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.140059 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-75df-account-create-update-mxv52" event={"ID":"b2273c58-3ea7-4979-a39d-6e9ba47b15aa","Type":"ContainerDied","Data":"61323818777a864c1e73a2bed2f65b025cf966990d18642a0bd3605fb2081c92"} Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.140078 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-75df-account-create-update-mxv52" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.140081 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61323818777a864c1e73a2bed2f65b025cf966990d18642a0bd3605fb2081c92" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.142031 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458","Type":"ContainerStarted","Data":"0b65d7751c631796afddb9d5cb6be8b33791f093200379b257e1458a02ef94be"} Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.142301 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.143459 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-h9s89" event={"ID":"2d0a1633-2540-4354-93e9-8e963e8245f0","Type":"ContainerDied","Data":"fc06b482c057752602eff51989f9422bda4c2f2e8cb3c22b5847e5668febb710"} Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.143481 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc06b482c057752602eff51989f9422bda4c2f2e8cb3c22b5847e5668febb710" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.143653 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-h9s89" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.524509 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.552728 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.889494556 podStartE2EDuration="1m3.552708682s" podCreationTimestamp="2025-11-28 07:13:17 +0000 UTC" firstStartedPulling="2025-11-28 07:13:19.14477296 +0000 UTC m=+1244.065168542" lastFinishedPulling="2025-11-28 07:13:45.807987076 +0000 UTC m=+1270.728382668" observedRunningTime="2025-11-28 07:14:20.196663206 +0000 UTC m=+1305.117058808" watchObservedRunningTime="2025-11-28 07:14:20.552708682 +0000 UTC m=+1305.473104264" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.676312 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.681967 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.718268 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mj56d\" (UniqueName: \"kubernetes.io/projected/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-kube-api-access-mj56d\") pod \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\" (UID: \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\") " Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.718397 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-operator-scripts\") pod \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\" (UID: \"7a229aad-3dbd-40d3-85ef-38fe07deaf5b\") " Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.719853 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7a229aad-3dbd-40d3-85ef-38fe07deaf5b" (UID: "7a229aad-3dbd-40d3-85ef-38fe07deaf5b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.742920 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-kube-api-access-mj56d" (OuterVolumeSpecName: "kube-api-access-mj56d") pod "7a229aad-3dbd-40d3-85ef-38fe07deaf5b" (UID: "7a229aad-3dbd-40d3-85ef-38fe07deaf5b"). InnerVolumeSpecName "kube-api-access-mj56d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.820805 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9br5n\" (UniqueName: \"kubernetes.io/projected/9b8dd495-8375-4779-9d6f-db1c25affa16-kube-api-access-9br5n\") pod \"9b8dd495-8375-4779-9d6f-db1c25affa16\" (UID: \"9b8dd495-8375-4779-9d6f-db1c25affa16\") " Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.820900 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtjmc\" (UniqueName: \"kubernetes.io/projected/36015a39-56bc-437e-9cdb-93f8ad059c45-kube-api-access-qtjmc\") pod \"36015a39-56bc-437e-9cdb-93f8ad059c45\" (UID: \"36015a39-56bc-437e-9cdb-93f8ad059c45\") " Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.820964 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36015a39-56bc-437e-9cdb-93f8ad059c45-operator-scripts\") pod \"36015a39-56bc-437e-9cdb-93f8ad059c45\" (UID: \"36015a39-56bc-437e-9cdb-93f8ad059c45\") " Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.821064 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8dd495-8375-4779-9d6f-db1c25affa16-operator-scripts\") pod \"9b8dd495-8375-4779-9d6f-db1c25affa16\" (UID: \"9b8dd495-8375-4779-9d6f-db1c25affa16\") " Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.821311 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.821465 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.821481 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mj56d\" (UniqueName: \"kubernetes.io/projected/7a229aad-3dbd-40d3-85ef-38fe07deaf5b-kube-api-access-mj56d\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:20 crc kubenswrapper[4922]: E1128 07:14:20.821577 4922 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 07:14:20 crc kubenswrapper[4922]: E1128 07:14:20.821592 4922 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 07:14:20 crc kubenswrapper[4922]: E1128 07:14:20.821634 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift podName:46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe nodeName:}" failed. No retries permitted until 2025-11-28 07:14:36.821620506 +0000 UTC m=+1321.742016088 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift") pod "swift-storage-0" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe") : configmap "swift-ring-files" not found Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.821684 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36015a39-56bc-437e-9cdb-93f8ad059c45-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "36015a39-56bc-437e-9cdb-93f8ad059c45" (UID: "36015a39-56bc-437e-9cdb-93f8ad059c45"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.822098 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b8dd495-8375-4779-9d6f-db1c25affa16-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9b8dd495-8375-4779-9d6f-db1c25affa16" (UID: "9b8dd495-8375-4779-9d6f-db1c25affa16"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.825585 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36015a39-56bc-437e-9cdb-93f8ad059c45-kube-api-access-qtjmc" (OuterVolumeSpecName: "kube-api-access-qtjmc") pod "36015a39-56bc-437e-9cdb-93f8ad059c45" (UID: "36015a39-56bc-437e-9cdb-93f8ad059c45"). InnerVolumeSpecName "kube-api-access-qtjmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.826416 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b8dd495-8375-4779-9d6f-db1c25affa16-kube-api-access-9br5n" (OuterVolumeSpecName: "kube-api-access-9br5n") pod "9b8dd495-8375-4779-9d6f-db1c25affa16" (UID: "9b8dd495-8375-4779-9d6f-db1c25affa16"). InnerVolumeSpecName "kube-api-access-9br5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.923114 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b8dd495-8375-4779-9d6f-db1c25affa16-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.923427 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9br5n\" (UniqueName: \"kubernetes.io/projected/9b8dd495-8375-4779-9d6f-db1c25affa16-kube-api-access-9br5n\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.923438 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtjmc\" (UniqueName: \"kubernetes.io/projected/36015a39-56bc-437e-9cdb-93f8ad059c45-kube-api-access-qtjmc\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:20 crc kubenswrapper[4922]: I1128 07:14:20.923446 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36015a39-56bc-437e-9cdb-93f8ad059c45-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.154156 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f279-account-create-update-jh2bs" event={"ID":"7a229aad-3dbd-40d3-85ef-38fe07deaf5b","Type":"ContainerDied","Data":"add549adf2d1116c77c1775078c44c1b02ec39e652f777643f6e3947cb0cbca3"} Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.154211 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="add549adf2d1116c77c1775078c44c1b02ec39e652f777643f6e3947cb0cbca3" Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.154259 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f279-account-create-update-jh2bs" Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.163080 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-n2fp7" event={"ID":"36015a39-56bc-437e-9cdb-93f8ad059c45","Type":"ContainerDied","Data":"d3ba3f9cedfcb9ba42222a4aa16225f5b2102c950cff1620114f67959f8fa0ca"} Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.163129 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3ba3f9cedfcb9ba42222a4aa16225f5b2102c950cff1620114f67959f8fa0ca" Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.163204 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-n2fp7" Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.164397 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-c9zx7" Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.164393 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-c9zx7" event={"ID":"9b8dd495-8375-4779-9d6f-db1c25affa16","Type":"ContainerDied","Data":"ce19099eb9b7fa2fc69a35e243f1b1e93b713106a4638fc7b33f13d167faec47"} Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.164488 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce19099eb9b7fa2fc69a35e243f1b1e93b713106a4638fc7b33f13d167faec47" Nov 28 07:14:21 crc kubenswrapper[4922]: I1128 07:14:21.790506 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.260551 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-wk4t8"] Nov 28 07:14:22 crc kubenswrapper[4922]: E1128 07:14:22.261114 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2273c58-3ea7-4979-a39d-6e9ba47b15aa" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261133 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2273c58-3ea7-4979-a39d-6e9ba47b15aa" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: E1128 07:14:22.261148 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerName="dnsmasq-dns" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261155 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerName="dnsmasq-dns" Nov 28 07:14:22 crc kubenswrapper[4922]: E1128 07:14:22.261164 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerName="init" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261170 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerName="init" Nov 28 07:14:22 crc kubenswrapper[4922]: E1128 07:14:22.261186 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a229aad-3dbd-40d3-85ef-38fe07deaf5b" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261192 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a229aad-3dbd-40d3-85ef-38fe07deaf5b" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: E1128 07:14:22.261207 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d0a1633-2540-4354-93e9-8e963e8245f0" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261233 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d0a1633-2540-4354-93e9-8e963e8245f0" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: E1128 07:14:22.261242 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b8dd495-8375-4779-9d6f-db1c25affa16" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261248 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b8dd495-8375-4779-9d6f-db1c25affa16" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: E1128 07:14:22.261258 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce9e0923-85c6-48cc-bfa7-9b51637a188d" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261265 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce9e0923-85c6-48cc-bfa7-9b51637a188d" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: E1128 07:14:22.261280 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36015a39-56bc-437e-9cdb-93f8ad059c45" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261285 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="36015a39-56bc-437e-9cdb-93f8ad059c45" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261420 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2273c58-3ea7-4979-a39d-6e9ba47b15aa" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261453 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5408d508-e0e4-46e7-a4a8-fd6982ad6cfe" containerName="dnsmasq-dns" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261468 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a229aad-3dbd-40d3-85ef-38fe07deaf5b" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261484 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d0a1633-2540-4354-93e9-8e963e8245f0" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261501 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b8dd495-8375-4779-9d6f-db1c25affa16" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261524 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce9e0923-85c6-48cc-bfa7-9b51637a188d" containerName="mariadb-account-create-update" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.261534 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="36015a39-56bc-437e-9cdb-93f8ad059c45" containerName="mariadb-database-create" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.262089 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.265034 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.265302 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-fnqzv" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.272355 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wk4t8"] Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.346763 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-db-sync-config-data\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.346895 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-combined-ca-bundle\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.346937 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f6qb\" (UniqueName: \"kubernetes.io/projected/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-kube-api-access-4f6qb\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.346959 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-config-data\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.448564 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f6qb\" (UniqueName: \"kubernetes.io/projected/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-kube-api-access-4f6qb\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.448618 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-config-data\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.448694 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-db-sync-config-data\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.448753 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-combined-ca-bundle\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.458101 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-config-data\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.462108 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-db-sync-config-data\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.476390 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f6qb\" (UniqueName: \"kubernetes.io/projected/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-kube-api-access-4f6qb\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.479463 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-combined-ca-bundle\") pod \"glance-db-sync-wk4t8\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:22 crc kubenswrapper[4922]: I1128 07:14:22.583215 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wk4t8" Nov 28 07:14:23 crc kubenswrapper[4922]: I1128 07:14:23.164279 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-wk4t8"] Nov 28 07:14:24 crc kubenswrapper[4922]: I1128 07:14:24.188842 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wk4t8" event={"ID":"2e87eedc-5f8a-46e2-bce1-c0361074a7f0","Type":"ContainerStarted","Data":"7664d9cdefc78a93b2e51824bb9f221c2353941984db22ec91f05d94b3a7a35a"} Nov 28 07:14:25 crc kubenswrapper[4922]: I1128 07:14:25.200882 4922 generic.go:334] "Generic (PLEG): container finished" podID="b78e6a70-a315-4c6d-8731-a21335e18766" containerID="60b74196fbe2ff1313cee7aed6bbe3eed676d5ed07bfc2f788f42483321a3e19" exitCode=0 Nov 28 07:14:25 crc kubenswrapper[4922]: I1128 07:14:25.200894 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-6kzln" event={"ID":"b78e6a70-a315-4c6d-8731-a21335e18766","Type":"ContainerDied","Data":"60b74196fbe2ff1313cee7aed6bbe3eed676d5ed07bfc2f788f42483321a3e19"} Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.213657 4922 generic.go:334] "Generic (PLEG): container finished" podID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerID="9e43566d759ea7d848615e1c8beb2d9a8c5b517a0be3388bc208d070214e406b" exitCode=0 Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.213831 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"99708a5d-57d5-4479-8e09-94428bb13fa3","Type":"ContainerDied","Data":"9e43566d759ea7d848615e1c8beb2d9a8c5b517a0be3388bc208d070214e406b"} Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.578947 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.726318 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-dispersionconf\") pod \"b78e6a70-a315-4c6d-8731-a21335e18766\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.726447 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b78e6a70-a315-4c6d-8731-a21335e18766-etc-swift\") pod \"b78e6a70-a315-4c6d-8731-a21335e18766\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.726481 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-scripts\") pod \"b78e6a70-a315-4c6d-8731-a21335e18766\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.726535 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-ring-data-devices\") pod \"b78e6a70-a315-4c6d-8731-a21335e18766\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.726608 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-combined-ca-bundle\") pod \"b78e6a70-a315-4c6d-8731-a21335e18766\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.726670 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-swiftconf\") pod \"b78e6a70-a315-4c6d-8731-a21335e18766\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.726690 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7xtv\" (UniqueName: \"kubernetes.io/projected/b78e6a70-a315-4c6d-8731-a21335e18766-kube-api-access-m7xtv\") pod \"b78e6a70-a315-4c6d-8731-a21335e18766\" (UID: \"b78e6a70-a315-4c6d-8731-a21335e18766\") " Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.739188 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "b78e6a70-a315-4c6d-8731-a21335e18766" (UID: "b78e6a70-a315-4c6d-8731-a21335e18766"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.739853 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b78e6a70-a315-4c6d-8731-a21335e18766-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "b78e6a70-a315-4c6d-8731-a21335e18766" (UID: "b78e6a70-a315-4c6d-8731-a21335e18766"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.746966 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b78e6a70-a315-4c6d-8731-a21335e18766-kube-api-access-m7xtv" (OuterVolumeSpecName: "kube-api-access-m7xtv") pod "b78e6a70-a315-4c6d-8731-a21335e18766" (UID: "b78e6a70-a315-4c6d-8731-a21335e18766"). InnerVolumeSpecName "kube-api-access-m7xtv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.751570 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "b78e6a70-a315-4c6d-8731-a21335e18766" (UID: "b78e6a70-a315-4c6d-8731-a21335e18766"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.753419 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-scripts" (OuterVolumeSpecName: "scripts") pod "b78e6a70-a315-4c6d-8731-a21335e18766" (UID: "b78e6a70-a315-4c6d-8731-a21335e18766"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.756439 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b78e6a70-a315-4c6d-8731-a21335e18766" (UID: "b78e6a70-a315-4c6d-8731-a21335e18766"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.777485 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "b78e6a70-a315-4c6d-8731-a21335e18766" (UID: "b78e6a70-a315-4c6d-8731-a21335e18766"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.832100 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.832359 4922 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.832372 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7xtv\" (UniqueName: \"kubernetes.io/projected/b78e6a70-a315-4c6d-8731-a21335e18766-kube-api-access-m7xtv\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.832384 4922 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b78e6a70-a315-4c6d-8731-a21335e18766-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.832393 4922 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b78e6a70-a315-4c6d-8731-a21335e18766-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.832402 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.832411 4922 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b78e6a70-a315-4c6d-8731-a21335e18766-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:26 crc kubenswrapper[4922]: I1128 07:14:26.904423 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-xqzrg" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerName="ovn-controller" probeResult="failure" output=< Nov 28 07:14:26 crc kubenswrapper[4922]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 07:14:26 crc kubenswrapper[4922]: > Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:26.988910 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.055618 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.222540 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-6kzln" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.225376 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-6kzln" event={"ID":"b78e6a70-a315-4c6d-8731-a21335e18766","Type":"ContainerDied","Data":"a4de9e9de8651a15c92dfa668c8e0cbc8fe1952ae86a80310797d8a5d6721112"} Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.225426 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4de9e9de8651a15c92dfa668c8e0cbc8fe1952ae86a80310797d8a5d6721112" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.227912 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"99708a5d-57d5-4479-8e09-94428bb13fa3","Type":"ContainerStarted","Data":"58bc4c962a13bd392e1f3d48869c89613df686531a50bf08290a8747044899ed"} Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.228627 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.267720 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371966.58708 podStartE2EDuration="1m10.267695759s" podCreationTimestamp="2025-11-28 07:13:17 +0000 UTC" firstStartedPulling="2025-11-28 07:13:19.556388669 +0000 UTC m=+1244.476784251" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:27.263001724 +0000 UTC m=+1312.183397316" watchObservedRunningTime="2025-11-28 07:14:27.267695759 +0000 UTC m=+1312.188091351" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.292069 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-xqzrg-config-ghjm2"] Nov 28 07:14:27 crc kubenswrapper[4922]: E1128 07:14:27.292480 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b78e6a70-a315-4c6d-8731-a21335e18766" containerName="swift-ring-rebalance" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.292499 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b78e6a70-a315-4c6d-8731-a21335e18766" containerName="swift-ring-rebalance" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.292710 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b78e6a70-a315-4c6d-8731-a21335e18766" containerName="swift-ring-rebalance" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.293385 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.295879 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.303156 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqzrg-config-ghjm2"] Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.440281 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run-ovn\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.440332 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.440375 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxl66\" (UniqueName: \"kubernetes.io/projected/e2cc2ab6-eb73-47e0-831e-681a4a088152-kube-api-access-lxl66\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.440440 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-log-ovn\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.440531 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-additional-scripts\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.440551 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-scripts\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.541758 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxl66\" (UniqueName: \"kubernetes.io/projected/e2cc2ab6-eb73-47e0-831e-681a4a088152-kube-api-access-lxl66\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.541822 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-log-ovn\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.541909 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-additional-scripts\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.541931 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-scripts\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.541968 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run-ovn\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.541988 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.542210 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-log-ovn\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.542231 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.542301 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run-ovn\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.542815 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-additional-scripts\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.544202 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-scripts\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.568234 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxl66\" (UniqueName: \"kubernetes.io/projected/e2cc2ab6-eb73-47e0-831e-681a4a088152-kube-api-access-lxl66\") pod \"ovn-controller-xqzrg-config-ghjm2\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.609798 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:27 crc kubenswrapper[4922]: I1128 07:14:27.897889 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqzrg-config-ghjm2"] Nov 28 07:14:28 crc kubenswrapper[4922]: I1128 07:14:28.241514 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg-config-ghjm2" event={"ID":"e2cc2ab6-eb73-47e0-831e-681a4a088152","Type":"ContainerStarted","Data":"b5c892ee462f9ec3a9c55c4c3d7323e2ed6a1178ab68cba33291863c08860d59"} Nov 28 07:14:29 crc kubenswrapper[4922]: I1128 07:14:29.255537 4922 generic.go:334] "Generic (PLEG): container finished" podID="e2cc2ab6-eb73-47e0-831e-681a4a088152" containerID="1bf50c3bf71cc2b87dc27ccb1b161efb4927ad6c2b31e4d00ce05f6d04927aa0" exitCode=0 Nov 28 07:14:29 crc kubenswrapper[4922]: I1128 07:14:29.255591 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg-config-ghjm2" event={"ID":"e2cc2ab6-eb73-47e0-831e-681a4a088152","Type":"ContainerDied","Data":"1bf50c3bf71cc2b87dc27ccb1b161efb4927ad6c2b31e4d00ce05f6d04927aa0"} Nov 28 07:14:31 crc kubenswrapper[4922]: I1128 07:14:31.923819 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-xqzrg" Nov 28 07:14:36 crc kubenswrapper[4922]: I1128 07:14:36.850892 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:36 crc kubenswrapper[4922]: I1128 07:14:36.879449 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"swift-storage-0\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " pod="openstack/swift-storage-0" Nov 28 07:14:37 crc kubenswrapper[4922]: I1128 07:14:37.016537 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 07:14:38 crc kubenswrapper[4922]: I1128 07:14:38.607705 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 07:14:38 crc kubenswrapper[4922]: I1128 07:14:38.843451 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-qgjm8" podUID="72bbf902-847b-45f2-9bb3-57de7a0a88ce" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 07:14:38 crc kubenswrapper[4922]: I1128 07:14:38.921468 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-lx69b"] Nov 28 07:14:38 crc kubenswrapper[4922]: I1128 07:14:38.921453 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 28 07:14:38 crc kubenswrapper[4922]: I1128 07:14:38.922934 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:38 crc kubenswrapper[4922]: I1128 07:14:38.931121 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-lx69b"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.024151 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gc85\" (UniqueName: \"kubernetes.io/projected/711e933a-0d19-4c60-90fe-c1c3ed0174ed-kube-api-access-9gc85\") pod \"barbican-db-create-lx69b\" (UID: \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\") " pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.024249 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/711e933a-0d19-4c60-90fe-c1c3ed0174ed-operator-scripts\") pod \"barbican-db-create-lx69b\" (UID: \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\") " pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.038912 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-5229-account-create-update-2k5qm"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.041841 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.044076 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.054021 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-5229-account-create-update-2k5qm"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.120231 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-jtvts"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.121899 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.126510 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9jzg\" (UniqueName: \"kubernetes.io/projected/813e18d9-b3c1-48eb-aff7-197d425910ef-kube-api-access-k9jzg\") pod \"barbican-5229-account-create-update-2k5qm\" (UID: \"813e18d9-b3c1-48eb-aff7-197d425910ef\") " pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.126591 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gc85\" (UniqueName: \"kubernetes.io/projected/711e933a-0d19-4c60-90fe-c1c3ed0174ed-kube-api-access-9gc85\") pod \"barbican-db-create-lx69b\" (UID: \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\") " pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.126619 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e18d9-b3c1-48eb-aff7-197d425910ef-operator-scripts\") pod \"barbican-5229-account-create-update-2k5qm\" (UID: \"813e18d9-b3c1-48eb-aff7-197d425910ef\") " pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.126731 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/711e933a-0d19-4c60-90fe-c1c3ed0174ed-operator-scripts\") pod \"barbican-db-create-lx69b\" (UID: \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\") " pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.127668 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/711e933a-0d19-4c60-90fe-c1c3ed0174ed-operator-scripts\") pod \"barbican-db-create-lx69b\" (UID: \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\") " pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.138354 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-jtvts"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.162087 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gc85\" (UniqueName: \"kubernetes.io/projected/711e933a-0d19-4c60-90fe-c1c3ed0174ed-kube-api-access-9gc85\") pod \"barbican-db-create-lx69b\" (UID: \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\") " pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.228174 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e18d9-b3c1-48eb-aff7-197d425910ef-operator-scripts\") pod \"barbican-5229-account-create-update-2k5qm\" (UID: \"813e18d9-b3c1-48eb-aff7-197d425910ef\") " pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.228315 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wctmq\" (UniqueName: \"kubernetes.io/projected/6a3388c9-ec6c-400b-bfcd-59374499fd42-kube-api-access-wctmq\") pod \"cinder-db-create-jtvts\" (UID: \"6a3388c9-ec6c-400b-bfcd-59374499fd42\") " pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.228353 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a3388c9-ec6c-400b-bfcd-59374499fd42-operator-scripts\") pod \"cinder-db-create-jtvts\" (UID: \"6a3388c9-ec6c-400b-bfcd-59374499fd42\") " pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.228506 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9jzg\" (UniqueName: \"kubernetes.io/projected/813e18d9-b3c1-48eb-aff7-197d425910ef-kube-api-access-k9jzg\") pod \"barbican-5229-account-create-update-2k5qm\" (UID: \"813e18d9-b3c1-48eb-aff7-197d425910ef\") " pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.229542 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e18d9-b3c1-48eb-aff7-197d425910ef-operator-scripts\") pod \"barbican-5229-account-create-update-2k5qm\" (UID: \"813e18d9-b3c1-48eb-aff7-197d425910ef\") " pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.242946 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9jzg\" (UniqueName: \"kubernetes.io/projected/813e18d9-b3c1-48eb-aff7-197d425910ef-kube-api-access-k9jzg\") pod \"barbican-5229-account-create-update-2k5qm\" (UID: \"813e18d9-b3c1-48eb-aff7-197d425910ef\") " pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.247743 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.300165 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-7l6kx"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.301974 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.304243 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-vx657" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.304477 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.304899 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.308505 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.329626 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wctmq\" (UniqueName: \"kubernetes.io/projected/6a3388c9-ec6c-400b-bfcd-59374499fd42-kube-api-access-wctmq\") pod \"cinder-db-create-jtvts\" (UID: \"6a3388c9-ec6c-400b-bfcd-59374499fd42\") " pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.329685 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a3388c9-ec6c-400b-bfcd-59374499fd42-operator-scripts\") pod \"cinder-db-create-jtvts\" (UID: \"6a3388c9-ec6c-400b-bfcd-59374499fd42\") " pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.330573 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a3388c9-ec6c-400b-bfcd-59374499fd42-operator-scripts\") pod \"cinder-db-create-jtvts\" (UID: \"6a3388c9-ec6c-400b-bfcd-59374499fd42\") " pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.356050 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.365939 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wctmq\" (UniqueName: \"kubernetes.io/projected/6a3388c9-ec6c-400b-bfcd-59374499fd42-kube-api-access-wctmq\") pod \"cinder-db-create-jtvts\" (UID: \"6a3388c9-ec6c-400b-bfcd-59374499fd42\") " pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.375276 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-7l6kx"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.384310 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-th8tp"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.385614 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.394631 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-e247-account-create-update-m48qt"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.395737 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.400508 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.431153 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-config-data\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.431232 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-combined-ca-bundle\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.431271 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k4g9\" (UniqueName: \"kubernetes.io/projected/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-kube-api-access-6k4g9\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.433969 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-th8tp"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.434004 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-e247-account-create-update-m48qt"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.452419 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.468299 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5206-account-create-update-97znp"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.470963 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.473498 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5206-account-create-update-97znp"] Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.473884 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.532671 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/674d4d02-4734-4cb5-a316-b12a9dcb974c-operator-scripts\") pod \"cinder-e247-account-create-update-m48qt\" (UID: \"674d4d02-4734-4cb5-a316-b12a9dcb974c\") " pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.532725 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmff7\" (UniqueName: \"kubernetes.io/projected/2f1294e8-adc5-47fa-a186-4ef79c093a3a-kube-api-access-mmff7\") pod \"neutron-db-create-th8tp\" (UID: \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\") " pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.532904 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-config-data\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.533049 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f1294e8-adc5-47fa-a186-4ef79c093a3a-operator-scripts\") pod \"neutron-db-create-th8tp\" (UID: \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\") " pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.533108 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-combined-ca-bundle\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.533251 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k4g9\" (UniqueName: \"kubernetes.io/projected/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-kube-api-access-6k4g9\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.533377 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6psf5\" (UniqueName: \"kubernetes.io/projected/674d4d02-4734-4cb5-a316-b12a9dcb974c-kube-api-access-6psf5\") pod \"cinder-e247-account-create-update-m48qt\" (UID: \"674d4d02-4734-4cb5-a316-b12a9dcb974c\") " pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.537337 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-combined-ca-bundle\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.541307 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-config-data\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.569492 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k4g9\" (UniqueName: \"kubernetes.io/projected/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-kube-api-access-6k4g9\") pod \"keystone-db-sync-7l6kx\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.624390 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.635401 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f1294e8-adc5-47fa-a186-4ef79c093a3a-operator-scripts\") pod \"neutron-db-create-th8tp\" (UID: \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\") " pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.635536 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6psf5\" (UniqueName: \"kubernetes.io/projected/674d4d02-4734-4cb5-a316-b12a9dcb974c-kube-api-access-6psf5\") pod \"cinder-e247-account-create-update-m48qt\" (UID: \"674d4d02-4734-4cb5-a316-b12a9dcb974c\") " pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.635603 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/674d4d02-4734-4cb5-a316-b12a9dcb974c-operator-scripts\") pod \"cinder-e247-account-create-update-m48qt\" (UID: \"674d4d02-4734-4cb5-a316-b12a9dcb974c\") " pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.635657 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmff7\" (UniqueName: \"kubernetes.io/projected/2f1294e8-adc5-47fa-a186-4ef79c093a3a-kube-api-access-mmff7\") pod \"neutron-db-create-th8tp\" (UID: \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\") " pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.635736 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8fb5604-6627-4b59-9407-fe88464ba214-operator-scripts\") pod \"neutron-5206-account-create-update-97znp\" (UID: \"c8fb5604-6627-4b59-9407-fe88464ba214\") " pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.635758 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w2v7\" (UniqueName: \"kubernetes.io/projected/c8fb5604-6627-4b59-9407-fe88464ba214-kube-api-access-2w2v7\") pod \"neutron-5206-account-create-update-97znp\" (UID: \"c8fb5604-6627-4b59-9407-fe88464ba214\") " pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.636199 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f1294e8-adc5-47fa-a186-4ef79c093a3a-operator-scripts\") pod \"neutron-db-create-th8tp\" (UID: \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\") " pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.636574 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/674d4d02-4734-4cb5-a316-b12a9dcb974c-operator-scripts\") pod \"cinder-e247-account-create-update-m48qt\" (UID: \"674d4d02-4734-4cb5-a316-b12a9dcb974c\") " pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.652412 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmff7\" (UniqueName: \"kubernetes.io/projected/2f1294e8-adc5-47fa-a186-4ef79c093a3a-kube-api-access-mmff7\") pod \"neutron-db-create-th8tp\" (UID: \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\") " pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.653602 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6psf5\" (UniqueName: \"kubernetes.io/projected/674d4d02-4734-4cb5-a316-b12a9dcb974c-kube-api-access-6psf5\") pod \"cinder-e247-account-create-update-m48qt\" (UID: \"674d4d02-4734-4cb5-a316-b12a9dcb974c\") " pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.736777 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8fb5604-6627-4b59-9407-fe88464ba214-operator-scripts\") pod \"neutron-5206-account-create-update-97znp\" (UID: \"c8fb5604-6627-4b59-9407-fe88464ba214\") " pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.736844 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w2v7\" (UniqueName: \"kubernetes.io/projected/c8fb5604-6627-4b59-9407-fe88464ba214-kube-api-access-2w2v7\") pod \"neutron-5206-account-create-update-97znp\" (UID: \"c8fb5604-6627-4b59-9407-fe88464ba214\") " pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.739298 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8fb5604-6627-4b59-9407-fe88464ba214-operator-scripts\") pod \"neutron-5206-account-create-update-97znp\" (UID: \"c8fb5604-6627-4b59-9407-fe88464ba214\") " pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.766682 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w2v7\" (UniqueName: \"kubernetes.io/projected/c8fb5604-6627-4b59-9407-fe88464ba214-kube-api-access-2w2v7\") pod \"neutron-5206-account-create-update-97znp\" (UID: \"c8fb5604-6627-4b59-9407-fe88464ba214\") " pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.772782 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.786462 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:39 crc kubenswrapper[4922]: I1128 07:14:39.813173 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:46 crc kubenswrapper[4922]: E1128 07:14:46.992580 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api@sha256:bb01899a9f257500587d26856df89b6046d8623ca11e51c1393030d590c80945" Nov 28 07:14:46 crc kubenswrapper[4922]: E1128 07:14:46.993318 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:bb01899a9f257500587d26856df89b6046d8623ca11e51c1393030d590c80945,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4f6qb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-wk4t8_openstack(2e87eedc-5f8a-46e2-bce1-c0361074a7f0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:14:46 crc kubenswrapper[4922]: E1128 07:14:46.994492 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-wk4t8" podUID="2e87eedc-5f8a-46e2-bce1-c0361074a7f0" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.206466 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.322730 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run\") pod \"e2cc2ab6-eb73-47e0-831e-681a4a088152\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.322784 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-log-ovn\") pod \"e2cc2ab6-eb73-47e0-831e-681a4a088152\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.322847 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run" (OuterVolumeSpecName: "var-run") pod "e2cc2ab6-eb73-47e0-831e-681a4a088152" (UID: "e2cc2ab6-eb73-47e0-831e-681a4a088152"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.322910 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e2cc2ab6-eb73-47e0-831e-681a4a088152" (UID: "e2cc2ab6-eb73-47e0-831e-681a4a088152"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.323104 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxl66\" (UniqueName: \"kubernetes.io/projected/e2cc2ab6-eb73-47e0-831e-681a4a088152-kube-api-access-lxl66\") pod \"e2cc2ab6-eb73-47e0-831e-681a4a088152\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.323137 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-additional-scripts\") pod \"e2cc2ab6-eb73-47e0-831e-681a4a088152\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.323167 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run-ovn\") pod \"e2cc2ab6-eb73-47e0-831e-681a4a088152\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.323200 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-scripts\") pod \"e2cc2ab6-eb73-47e0-831e-681a4a088152\" (UID: \"e2cc2ab6-eb73-47e0-831e-681a4a088152\") " Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.323509 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e2cc2ab6-eb73-47e0-831e-681a4a088152" (UID: "e2cc2ab6-eb73-47e0-831e-681a4a088152"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.324544 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e2cc2ab6-eb73-47e0-831e-681a4a088152" (UID: "e2cc2ab6-eb73-47e0-831e-681a4a088152"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.324633 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-scripts" (OuterVolumeSpecName: "scripts") pod "e2cc2ab6-eb73-47e0-831e-681a4a088152" (UID: "e2cc2ab6-eb73-47e0-831e-681a4a088152"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.324700 4922 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.324712 4922 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.324720 4922 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e2cc2ab6-eb73-47e0-831e-681a4a088152-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.340236 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2cc2ab6-eb73-47e0-831e-681a4a088152-kube-api-access-lxl66" (OuterVolumeSpecName: "kube-api-access-lxl66") pod "e2cc2ab6-eb73-47e0-831e-681a4a088152" (UID: "e2cc2ab6-eb73-47e0-831e-681a4a088152"). InnerVolumeSpecName "kube-api-access-lxl66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.420603 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg-config-ghjm2" event={"ID":"e2cc2ab6-eb73-47e0-831e-681a4a088152","Type":"ContainerDied","Data":"b5c892ee462f9ec3a9c55c4c3d7323e2ed6a1178ab68cba33291863c08860d59"} Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.420629 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg-config-ghjm2" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.420643 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5c892ee462f9ec3a9c55c4c3d7323e2ed6a1178ab68cba33291863c08860d59" Nov 28 07:14:47 crc kubenswrapper[4922]: E1128 07:14:47.421963 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api@sha256:bb01899a9f257500587d26856df89b6046d8623ca11e51c1393030d590c80945\\\"\"" pod="openstack/glance-db-sync-wk4t8" podUID="2e87eedc-5f8a-46e2-bce1-c0361074a7f0" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.427593 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.427621 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxl66\" (UniqueName: \"kubernetes.io/projected/e2cc2ab6-eb73-47e0-831e-681a4a088152-kube-api-access-lxl66\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.427634 4922 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e2cc2ab6-eb73-47e0-831e-681a4a088152-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.630155 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-5229-account-create-update-2k5qm"] Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.752693 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-e247-account-create-update-m48qt"] Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.758833 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5206-account-create-update-97znp"] Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.767669 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-7l6kx"] Nov 28 07:14:47 crc kubenswrapper[4922]: W1128 07:14:47.767891 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod674d4d02_4734_4cb5_a316_b12a9dcb974c.slice/crio-210b0663a9db43c661e35f805cdd633babb0df0722371fd81674b1e8398d73d6 WatchSource:0}: Error finding container 210b0663a9db43c661e35f805cdd633babb0df0722371fd81674b1e8398d73d6: Status 404 returned error can't find the container with id 210b0663a9db43c661e35f805cdd633babb0df0722371fd81674b1e8398d73d6 Nov 28 07:14:47 crc kubenswrapper[4922]: W1128 07:14:47.773503 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc8fb5604_6627_4b59_9407_fe88464ba214.slice/crio-21f9f77113cae0aa704f9f057e94b9ad9c1446ab0b2b33786cf14578faa7323f WatchSource:0}: Error finding container 21f9f77113cae0aa704f9f057e94b9ad9c1446ab0b2b33786cf14578faa7323f: Status 404 returned error can't find the container with id 21f9f77113cae0aa704f9f057e94b9ad9c1446ab0b2b33786cf14578faa7323f Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.778409 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-jtvts"] Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.785889 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-th8tp"] Nov 28 07:14:47 crc kubenswrapper[4922]: I1128 07:14:47.792326 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-lx69b"] Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.140461 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 07:14:48 crc kubenswrapper[4922]: W1128 07:14:48.143560 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46c3d0a8_d9ed_419a_baf3_57aaaf0c56fe.slice/crio-faa40d67094345d06ccb5f1f49589a811c96247ba3b8169fb17210e5a0cd654a WatchSource:0}: Error finding container faa40d67094345d06ccb5f1f49589a811c96247ba3b8169fb17210e5a0cd654a: Status 404 returned error can't find the container with id faa40d67094345d06ccb5f1f49589a811c96247ba3b8169fb17210e5a0cd654a Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.343465 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-xqzrg-config-ghjm2"] Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.352629 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-xqzrg-config-ghjm2"] Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.430531 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5206-account-create-update-97znp" event={"ID":"c8fb5604-6627-4b59-9407-fe88464ba214","Type":"ContainerStarted","Data":"5110efc5edb18c6fad94ea09c68d35b7915ac98812274e1ced945184faa6c733"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.430581 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5206-account-create-update-97znp" event={"ID":"c8fb5604-6627-4b59-9407-fe88464ba214","Type":"ContainerStarted","Data":"21f9f77113cae0aa704f9f057e94b9ad9c1446ab0b2b33786cf14578faa7323f"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.435018 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"faa40d67094345d06ccb5f1f49589a811c96247ba3b8169fb17210e5a0cd654a"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.436055 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7l6kx" event={"ID":"33e395bd-1a5e-4c48-829d-e7b3408e9b8e","Type":"ContainerStarted","Data":"b6cf9e43f8947de8c7eed6ff8803d21971b37430564395c825d05a153febbc3d"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.437393 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-xqzrg-config-zcp8g"] Nov 28 07:14:48 crc kubenswrapper[4922]: E1128 07:14:48.437807 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2cc2ab6-eb73-47e0-831e-681a4a088152" containerName="ovn-config" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.437833 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2cc2ab6-eb73-47e0-831e-681a4a088152" containerName="ovn-config" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.437812 4922 generic.go:334] "Generic (PLEG): container finished" podID="6a3388c9-ec6c-400b-bfcd-59374499fd42" containerID="ad88894264657dce236a449d6c1a1670d012fb1c0d3ce440093a76099c5e14a9" exitCode=0 Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.438074 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2cc2ab6-eb73-47e0-831e-681a4a088152" containerName="ovn-config" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.438803 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jtvts" event={"ID":"6a3388c9-ec6c-400b-bfcd-59374499fd42","Type":"ContainerDied","Data":"ad88894264657dce236a449d6c1a1670d012fb1c0d3ce440093a76099c5e14a9"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.438835 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jtvts" event={"ID":"6a3388c9-ec6c-400b-bfcd-59374499fd42","Type":"ContainerStarted","Data":"4dc1017000fb5e4060e4d2e0662b16f8515df792e7f2726c25234bf274208a14"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.438913 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.440444 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-lx69b" event={"ID":"711e933a-0d19-4c60-90fe-c1c3ed0174ed","Type":"ContainerStarted","Data":"8f71a24ebe84a952f8cb8a31ae36d0aff6eddbd2132b3fe335e8fbf512a8c407"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.440476 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-lx69b" event={"ID":"711e933a-0d19-4c60-90fe-c1c3ed0174ed","Type":"ContainerStarted","Data":"e8047d85e64380685e4a9394b2a5a67e327242eb590883f15209b1c48093b094"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.441171 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.443330 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-th8tp" event={"ID":"2f1294e8-adc5-47fa-a186-4ef79c093a3a","Type":"ContainerStarted","Data":"e47ad8cdb4ff4702350e09f95a7319059daf400eab8c1e5b6e95d48c4c065416"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.443396 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-th8tp" event={"ID":"2f1294e8-adc5-47fa-a186-4ef79c093a3a","Type":"ContainerStarted","Data":"de8d6676834eb01ee68e169f1fb6504dcf426e6df2afbe70b192dc19fef2e9a2"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.450506 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5229-account-create-update-2k5qm" event={"ID":"813e18d9-b3c1-48eb-aff7-197d425910ef","Type":"ContainerStarted","Data":"4832975092faa8cc9fb4db742453510f007c3fc48070e2ad74419d9ba8a9f78f"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.450565 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5229-account-create-update-2k5qm" event={"ID":"813e18d9-b3c1-48eb-aff7-197d425910ef","Type":"ContainerStarted","Data":"15639519b0044a4683424b3e49558c6b7b1f89747feba3ff45c5cf59309828c2"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.452500 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-e247-account-create-update-m48qt" event={"ID":"674d4d02-4734-4cb5-a316-b12a9dcb974c","Type":"ContainerStarted","Data":"e402e349dd0eadea957795fc5040e837508e28009179cb1326770ba305c82e02"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.452535 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-e247-account-create-update-m48qt" event={"ID":"674d4d02-4734-4cb5-a316-b12a9dcb974c","Type":"ContainerStarted","Data":"210b0663a9db43c661e35f805cdd633babb0df0722371fd81674b1e8398d73d6"} Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.467706 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqzrg-config-zcp8g"] Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.474494 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5206-account-create-update-97znp" podStartSLOduration=9.474472328 podStartE2EDuration="9.474472328s" podCreationTimestamp="2025-11-28 07:14:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:48.443065134 +0000 UTC m=+1333.363460716" watchObservedRunningTime="2025-11-28 07:14:48.474472328 +0000 UTC m=+1333.394867910" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.490037 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-lx69b" podStartSLOduration=10.49002011 podStartE2EDuration="10.49002011s" podCreationTimestamp="2025-11-28 07:14:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:48.47494047 +0000 UTC m=+1333.395336072" watchObservedRunningTime="2025-11-28 07:14:48.49002011 +0000 UTC m=+1333.410415692" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.510537 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-5229-account-create-update-2k5qm" podStartSLOduration=9.510519764 podStartE2EDuration="9.510519764s" podCreationTimestamp="2025-11-28 07:14:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:48.504008601 +0000 UTC m=+1333.424404193" watchObservedRunningTime="2025-11-28 07:14:48.510519764 +0000 UTC m=+1333.430915346" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.536126 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-e247-account-create-update-m48qt" podStartSLOduration=9.536105853 podStartE2EDuration="9.536105853s" podCreationTimestamp="2025-11-28 07:14:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:48.517487019 +0000 UTC m=+1333.437882601" watchObservedRunningTime="2025-11-28 07:14:48.536105853 +0000 UTC m=+1333.456501445" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.539656 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-th8tp" podStartSLOduration=9.539639756 podStartE2EDuration="9.539639756s" podCreationTimestamp="2025-11-28 07:14:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:14:48.530519205 +0000 UTC m=+1333.450914787" watchObservedRunningTime="2025-11-28 07:14:48.539639756 +0000 UTC m=+1333.460035338" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.543797 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-additional-scripts\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.543845 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-log-ovn\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.544083 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run-ovn\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.544280 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-scripts\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.544318 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vg54\" (UniqueName: \"kubernetes.io/projected/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-kube-api-access-4vg54\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.544346 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.645601 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-scripts\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.645652 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vg54\" (UniqueName: \"kubernetes.io/projected/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-kube-api-access-4vg54\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.645674 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.645727 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-additional-scripts\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.645744 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-log-ovn\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.645810 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run-ovn\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.646335 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run-ovn\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.646430 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-log-ovn\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.646454 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.646848 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-additional-scripts\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.650504 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-scripts\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.677536 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vg54\" (UniqueName: \"kubernetes.io/projected/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-kube-api-access-4vg54\") pod \"ovn-controller-xqzrg-config-zcp8g\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.755386 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:48 crc kubenswrapper[4922]: I1128 07:14:48.922214 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.233790 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-xqzrg-config-zcp8g"] Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.408540 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2cc2ab6-eb73-47e0-831e-681a4a088152" path="/var/lib/kubelet/pods/e2cc2ab6-eb73-47e0-831e-681a4a088152/volumes" Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.463388 4922 generic.go:334] "Generic (PLEG): container finished" podID="674d4d02-4734-4cb5-a316-b12a9dcb974c" containerID="e402e349dd0eadea957795fc5040e837508e28009179cb1326770ba305c82e02" exitCode=0 Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.463445 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-e247-account-create-update-m48qt" event={"ID":"674d4d02-4734-4cb5-a316-b12a9dcb974c","Type":"ContainerDied","Data":"e402e349dd0eadea957795fc5040e837508e28009179cb1326770ba305c82e02"} Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.465807 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg-config-zcp8g" event={"ID":"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd","Type":"ContainerStarted","Data":"9bbcc0775f09b681b768f804deeca68eda7cb74df365af8bcb19bec3c8c44a8b"} Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.467323 4922 generic.go:334] "Generic (PLEG): container finished" podID="c8fb5604-6627-4b59-9407-fe88464ba214" containerID="5110efc5edb18c6fad94ea09c68d35b7915ac98812274e1ced945184faa6c733" exitCode=0 Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.467367 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5206-account-create-update-97znp" event={"ID":"c8fb5604-6627-4b59-9407-fe88464ba214","Type":"ContainerDied","Data":"5110efc5edb18c6fad94ea09c68d35b7915ac98812274e1ced945184faa6c733"} Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.469409 4922 generic.go:334] "Generic (PLEG): container finished" podID="711e933a-0d19-4c60-90fe-c1c3ed0174ed" containerID="8f71a24ebe84a952f8cb8a31ae36d0aff6eddbd2132b3fe335e8fbf512a8c407" exitCode=0 Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.469496 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-lx69b" event={"ID":"711e933a-0d19-4c60-90fe-c1c3ed0174ed","Type":"ContainerDied","Data":"8f71a24ebe84a952f8cb8a31ae36d0aff6eddbd2132b3fe335e8fbf512a8c407"} Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.472755 4922 generic.go:334] "Generic (PLEG): container finished" podID="2f1294e8-adc5-47fa-a186-4ef79c093a3a" containerID="e47ad8cdb4ff4702350e09f95a7319059daf400eab8c1e5b6e95d48c4c065416" exitCode=0 Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.472803 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-th8tp" event={"ID":"2f1294e8-adc5-47fa-a186-4ef79c093a3a","Type":"ContainerDied","Data":"e47ad8cdb4ff4702350e09f95a7319059daf400eab8c1e5b6e95d48c4c065416"} Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.480537 4922 generic.go:334] "Generic (PLEG): container finished" podID="813e18d9-b3c1-48eb-aff7-197d425910ef" containerID="4832975092faa8cc9fb4db742453510f007c3fc48070e2ad74419d9ba8a9f78f" exitCode=0 Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.480727 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5229-account-create-update-2k5qm" event={"ID":"813e18d9-b3c1-48eb-aff7-197d425910ef","Type":"ContainerDied","Data":"4832975092faa8cc9fb4db742453510f007c3fc48070e2ad74419d9ba8a9f78f"} Nov 28 07:14:49 crc kubenswrapper[4922]: I1128 07:14:49.943374 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.068385 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a3388c9-ec6c-400b-bfcd-59374499fd42-operator-scripts\") pod \"6a3388c9-ec6c-400b-bfcd-59374499fd42\" (UID: \"6a3388c9-ec6c-400b-bfcd-59374499fd42\") " Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.068429 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wctmq\" (UniqueName: \"kubernetes.io/projected/6a3388c9-ec6c-400b-bfcd-59374499fd42-kube-api-access-wctmq\") pod \"6a3388c9-ec6c-400b-bfcd-59374499fd42\" (UID: \"6a3388c9-ec6c-400b-bfcd-59374499fd42\") " Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.069252 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a3388c9-ec6c-400b-bfcd-59374499fd42-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6a3388c9-ec6c-400b-bfcd-59374499fd42" (UID: "6a3388c9-ec6c-400b-bfcd-59374499fd42"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.075104 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a3388c9-ec6c-400b-bfcd-59374499fd42-kube-api-access-wctmq" (OuterVolumeSpecName: "kube-api-access-wctmq") pod "6a3388c9-ec6c-400b-bfcd-59374499fd42" (UID: "6a3388c9-ec6c-400b-bfcd-59374499fd42"). InnerVolumeSpecName "kube-api-access-wctmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.170565 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a3388c9-ec6c-400b-bfcd-59374499fd42-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.170618 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wctmq\" (UniqueName: \"kubernetes.io/projected/6a3388c9-ec6c-400b-bfcd-59374499fd42-kube-api-access-wctmq\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.509204 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jtvts" event={"ID":"6a3388c9-ec6c-400b-bfcd-59374499fd42","Type":"ContainerDied","Data":"4dc1017000fb5e4060e4d2e0662b16f8515df792e7f2726c25234bf274208a14"} Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.509278 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4dc1017000fb5e4060e4d2e0662b16f8515df792e7f2726c25234bf274208a14" Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.509356 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jtvts" Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.512319 4922 generic.go:334] "Generic (PLEG): container finished" podID="55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" containerID="2fede77209e185216c36e681023a500b49193d28779dba1f035c16d758a707af" exitCode=0 Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.512374 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg-config-zcp8g" event={"ID":"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd","Type":"ContainerDied","Data":"2fede77209e185216c36e681023a500b49193d28779dba1f035c16d758a707af"} Nov 28 07:14:50 crc kubenswrapper[4922]: I1128 07:14:50.516151 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"d3b90768a305d76f6bbe78d7fd4d3b39f50f3a28bba75d44bc70674b4aca8f70"} Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.592683 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-lx69b" event={"ID":"711e933a-0d19-4c60-90fe-c1c3ed0174ed","Type":"ContainerDied","Data":"e8047d85e64380685e4a9394b2a5a67e327242eb590883f15209b1c48093b094"} Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.593173 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8047d85e64380685e4a9394b2a5a67e327242eb590883f15209b1c48093b094" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.595487 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-th8tp" event={"ID":"2f1294e8-adc5-47fa-a186-4ef79c093a3a","Type":"ContainerDied","Data":"de8d6676834eb01ee68e169f1fb6504dcf426e6df2afbe70b192dc19fef2e9a2"} Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.595527 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de8d6676834eb01ee68e169f1fb6504dcf426e6df2afbe70b192dc19fef2e9a2" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.597002 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-5229-account-create-update-2k5qm" event={"ID":"813e18d9-b3c1-48eb-aff7-197d425910ef","Type":"ContainerDied","Data":"15639519b0044a4683424b3e49558c6b7b1f89747feba3ff45c5cf59309828c2"} Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.597024 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15639519b0044a4683424b3e49558c6b7b1f89747feba3ff45c5cf59309828c2" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.598426 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-e247-account-create-update-m48qt" event={"ID":"674d4d02-4734-4cb5-a316-b12a9dcb974c","Type":"ContainerDied","Data":"210b0663a9db43c661e35f805cdd633babb0df0722371fd81674b1e8398d73d6"} Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.598450 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="210b0663a9db43c661e35f805cdd633babb0df0722371fd81674b1e8398d73d6" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.599616 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg-config-zcp8g" event={"ID":"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd","Type":"ContainerDied","Data":"9bbcc0775f09b681b768f804deeca68eda7cb74df365af8bcb19bec3c8c44a8b"} Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.599637 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bbcc0775f09b681b768f804deeca68eda7cb74df365af8bcb19bec3c8c44a8b" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.600597 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5206-account-create-update-97znp" event={"ID":"c8fb5604-6627-4b59-9407-fe88464ba214","Type":"ContainerDied","Data":"21f9f77113cae0aa704f9f057e94b9ad9c1446ab0b2b33786cf14578faa7323f"} Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.600617 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21f9f77113cae0aa704f9f057e94b9ad9c1446ab0b2b33786cf14578faa7323f" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.631615 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.637963 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.642621 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.674822 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.689633 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.694785 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821650 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6psf5\" (UniqueName: \"kubernetes.io/projected/674d4d02-4734-4cb5-a316-b12a9dcb974c-kube-api-access-6psf5\") pod \"674d4d02-4734-4cb5-a316-b12a9dcb974c\" (UID: \"674d4d02-4734-4cb5-a316-b12a9dcb974c\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821727 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/674d4d02-4734-4cb5-a316-b12a9dcb974c-operator-scripts\") pod \"674d4d02-4734-4cb5-a316-b12a9dcb974c\" (UID: \"674d4d02-4734-4cb5-a316-b12a9dcb974c\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821756 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e18d9-b3c1-48eb-aff7-197d425910ef-operator-scripts\") pod \"813e18d9-b3c1-48eb-aff7-197d425910ef\" (UID: \"813e18d9-b3c1-48eb-aff7-197d425910ef\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821792 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gc85\" (UniqueName: \"kubernetes.io/projected/711e933a-0d19-4c60-90fe-c1c3ed0174ed-kube-api-access-9gc85\") pod \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\" (UID: \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821846 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w2v7\" (UniqueName: \"kubernetes.io/projected/c8fb5604-6627-4b59-9407-fe88464ba214-kube-api-access-2w2v7\") pod \"c8fb5604-6627-4b59-9407-fe88464ba214\" (UID: \"c8fb5604-6627-4b59-9407-fe88464ba214\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821867 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-additional-scripts\") pod \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821900 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8fb5604-6627-4b59-9407-fe88464ba214-operator-scripts\") pod \"c8fb5604-6627-4b59-9407-fe88464ba214\" (UID: \"c8fb5604-6627-4b59-9407-fe88464ba214\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821933 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run-ovn\") pod \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821968 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/711e933a-0d19-4c60-90fe-c1c3ed0174ed-operator-scripts\") pod \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\" (UID: \"711e933a-0d19-4c60-90fe-c1c3ed0174ed\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.821996 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-scripts\") pod \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822018 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f1294e8-adc5-47fa-a186-4ef79c093a3a-operator-scripts\") pod \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\" (UID: \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822100 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k9jzg\" (UniqueName: \"kubernetes.io/projected/813e18d9-b3c1-48eb-aff7-197d425910ef-kube-api-access-k9jzg\") pod \"813e18d9-b3c1-48eb-aff7-197d425910ef\" (UID: \"813e18d9-b3c1-48eb-aff7-197d425910ef\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822119 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vg54\" (UniqueName: \"kubernetes.io/projected/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-kube-api-access-4vg54\") pod \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822138 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-log-ovn\") pod \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822167 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmff7\" (UniqueName: \"kubernetes.io/projected/2f1294e8-adc5-47fa-a186-4ef79c093a3a-kube-api-access-mmff7\") pod \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\" (UID: \"2f1294e8-adc5-47fa-a186-4ef79c093a3a\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822182 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run\") pod \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\" (UID: \"55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd\") " Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822278 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" (UID: "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822508 4922 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.822550 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run" (OuterVolumeSpecName: "var-run") pod "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" (UID: "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.823112 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/711e933a-0d19-4c60-90fe-c1c3ed0174ed-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "711e933a-0d19-4c60-90fe-c1c3ed0174ed" (UID: "711e933a-0d19-4c60-90fe-c1c3ed0174ed"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.824202 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f1294e8-adc5-47fa-a186-4ef79c093a3a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2f1294e8-adc5-47fa-a186-4ef79c093a3a" (UID: "2f1294e8-adc5-47fa-a186-4ef79c093a3a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.824554 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/674d4d02-4734-4cb5-a316-b12a9dcb974c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "674d4d02-4734-4cb5-a316-b12a9dcb974c" (UID: "674d4d02-4734-4cb5-a316-b12a9dcb974c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.824552 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" (UID: "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.825074 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8fb5604-6627-4b59-9407-fe88464ba214-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c8fb5604-6627-4b59-9407-fe88464ba214" (UID: "c8fb5604-6627-4b59-9407-fe88464ba214"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.825136 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" (UID: "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.825562 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/813e18d9-b3c1-48eb-aff7-197d425910ef-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "813e18d9-b3c1-48eb-aff7-197d425910ef" (UID: "813e18d9-b3c1-48eb-aff7-197d425910ef"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.826309 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-scripts" (OuterVolumeSpecName: "scripts") pod "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" (UID: "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.828394 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/674d4d02-4734-4cb5-a316-b12a9dcb974c-kube-api-access-6psf5" (OuterVolumeSpecName: "kube-api-access-6psf5") pod "674d4d02-4734-4cb5-a316-b12a9dcb974c" (UID: "674d4d02-4734-4cb5-a316-b12a9dcb974c"). InnerVolumeSpecName "kube-api-access-6psf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.829327 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/711e933a-0d19-4c60-90fe-c1c3ed0174ed-kube-api-access-9gc85" (OuterVolumeSpecName: "kube-api-access-9gc85") pod "711e933a-0d19-4c60-90fe-c1c3ed0174ed" (UID: "711e933a-0d19-4c60-90fe-c1c3ed0174ed"). InnerVolumeSpecName "kube-api-access-9gc85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.829533 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/813e18d9-b3c1-48eb-aff7-197d425910ef-kube-api-access-k9jzg" (OuterVolumeSpecName: "kube-api-access-k9jzg") pod "813e18d9-b3c1-48eb-aff7-197d425910ef" (UID: "813e18d9-b3c1-48eb-aff7-197d425910ef"). InnerVolumeSpecName "kube-api-access-k9jzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.830110 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-kube-api-access-4vg54" (OuterVolumeSpecName: "kube-api-access-4vg54") pod "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" (UID: "55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd"). InnerVolumeSpecName "kube-api-access-4vg54". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.831050 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f1294e8-adc5-47fa-a186-4ef79c093a3a-kube-api-access-mmff7" (OuterVolumeSpecName: "kube-api-access-mmff7") pod "2f1294e8-adc5-47fa-a186-4ef79c093a3a" (UID: "2f1294e8-adc5-47fa-a186-4ef79c093a3a"). InnerVolumeSpecName "kube-api-access-mmff7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.831313 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8fb5604-6627-4b59-9407-fe88464ba214-kube-api-access-2w2v7" (OuterVolumeSpecName: "kube-api-access-2w2v7") pod "c8fb5604-6627-4b59-9407-fe88464ba214" (UID: "c8fb5604-6627-4b59-9407-fe88464ba214"). InnerVolumeSpecName "kube-api-access-2w2v7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.928597 4922 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.928936 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmff7\" (UniqueName: \"kubernetes.io/projected/2f1294e8-adc5-47fa-a186-4ef79c093a3a-kube-api-access-mmff7\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.928951 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6psf5\" (UniqueName: \"kubernetes.io/projected/674d4d02-4734-4cb5-a316-b12a9dcb974c-kube-api-access-6psf5\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929348 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/674d4d02-4734-4cb5-a316-b12a9dcb974c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929368 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e18d9-b3c1-48eb-aff7-197d425910ef-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929403 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gc85\" (UniqueName: \"kubernetes.io/projected/711e933a-0d19-4c60-90fe-c1c3ed0174ed-kube-api-access-9gc85\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929412 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w2v7\" (UniqueName: \"kubernetes.io/projected/c8fb5604-6627-4b59-9407-fe88464ba214-kube-api-access-2w2v7\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929421 4922 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929429 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c8fb5604-6627-4b59-9407-fe88464ba214-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929437 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/711e933a-0d19-4c60-90fe-c1c3ed0174ed-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929448 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929456 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f1294e8-adc5-47fa-a186-4ef79c093a3a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929464 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k9jzg\" (UniqueName: \"kubernetes.io/projected/813e18d9-b3c1-48eb-aff7-197d425910ef-kube-api-access-k9jzg\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929472 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vg54\" (UniqueName: \"kubernetes.io/projected/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-kube-api-access-4vg54\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:54 crc kubenswrapper[4922]: I1128 07:14:54.929480 4922 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.613212 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-5229-account-create-update-2k5qm" Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.613871 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"67652f0b906ad1c1976e1f6ddc6a979b5ca575b0328d4c60af513fc48df7cb8c"} Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.613922 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-lx69b" Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.614305 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-e247-account-create-update-m48qt" Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.614646 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg-config-zcp8g" Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.614966 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-th8tp" Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.615300 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5206-account-create-update-97znp" Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.762989 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-xqzrg-config-zcp8g"] Nov 28 07:14:55 crc kubenswrapper[4922]: I1128 07:14:55.785545 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-xqzrg-config-zcp8g"] Nov 28 07:14:57 crc kubenswrapper[4922]: I1128 07:14:57.412124 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" path="/var/lib/kubelet/pods/55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd/volumes" Nov 28 07:14:57 crc kubenswrapper[4922]: I1128 07:14:57.638071 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"ee22fa27f603d2ddf49e9b07c51a65a98b3a7d08d855a70a1f3939851f7f60c5"} Nov 28 07:14:57 crc kubenswrapper[4922]: I1128 07:14:57.638517 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"26dd3a6e06dd6158dbd36b9a5fc4871c38c4a4f2e97a3e98df0085998f6374ae"} Nov 28 07:14:57 crc kubenswrapper[4922]: I1128 07:14:57.640767 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7l6kx" event={"ID":"33e395bd-1a5e-4c48-829d-e7b3408e9b8e","Type":"ContainerStarted","Data":"29b28ab9109f2b7a10e56ca5f88ae46e6843947c82e9c3c79ada80258c2f7af2"} Nov 28 07:14:57 crc kubenswrapper[4922]: I1128 07:14:57.669594 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-7l6kx" podStartSLOduration=11.551406966 podStartE2EDuration="18.669568327s" podCreationTimestamp="2025-11-28 07:14:39 +0000 UTC" firstStartedPulling="2025-11-28 07:14:47.773619176 +0000 UTC m=+1332.694014758" lastFinishedPulling="2025-11-28 07:14:54.891780507 +0000 UTC m=+1339.812176119" observedRunningTime="2025-11-28 07:14:57.66552991 +0000 UTC m=+1342.585925492" watchObservedRunningTime="2025-11-28 07:14:57.669568327 +0000 UTC m=+1342.589963949" Nov 28 07:14:58 crc kubenswrapper[4922]: I1128 07:14:58.927249 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.156213 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4"] Nov 28 07:15:00 crc kubenswrapper[4922]: E1128 07:15:00.157071 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8fb5604-6627-4b59-9407-fe88464ba214" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.157162 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8fb5604-6627-4b59-9407-fe88464ba214" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: E1128 07:15:00.157248 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="674d4d02-4734-4cb5-a316-b12a9dcb974c" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.157301 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="674d4d02-4734-4cb5-a316-b12a9dcb974c" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: E1128 07:15:00.157354 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f1294e8-adc5-47fa-a186-4ef79c093a3a" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.157401 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f1294e8-adc5-47fa-a186-4ef79c093a3a" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: E1128 07:15:00.157473 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="711e933a-0d19-4c60-90fe-c1c3ed0174ed" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.157527 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="711e933a-0d19-4c60-90fe-c1c3ed0174ed" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: E1128 07:15:00.157593 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a3388c9-ec6c-400b-bfcd-59374499fd42" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.157648 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a3388c9-ec6c-400b-bfcd-59374499fd42" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: E1128 07:15:00.157700 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" containerName="ovn-config" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.157754 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" containerName="ovn-config" Nov 28 07:15:00 crc kubenswrapper[4922]: E1128 07:15:00.157811 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="813e18d9-b3c1-48eb-aff7-197d425910ef" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.157897 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="813e18d9-b3c1-48eb-aff7-197d425910ef" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.158114 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a3388c9-ec6c-400b-bfcd-59374499fd42" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.158180 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="55d5918c-75a5-4acb-a1eb-a2aa9a4da0cd" containerName="ovn-config" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.158247 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8fb5604-6627-4b59-9407-fe88464ba214" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.158301 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="674d4d02-4734-4cb5-a316-b12a9dcb974c" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.158356 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="711e933a-0d19-4c60-90fe-c1c3ed0174ed" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.158408 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f1294e8-adc5-47fa-a186-4ef79c093a3a" containerName="mariadb-database-create" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.158472 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="813e18d9-b3c1-48eb-aff7-197d425910ef" containerName="mariadb-account-create-update" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.159052 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.161448 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.161461 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.163536 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4"] Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.323839 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m5ts\" (UniqueName: \"kubernetes.io/projected/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-kube-api-access-5m5ts\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.324287 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-config-volume\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.324636 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-secret-volume\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.426367 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m5ts\" (UniqueName: \"kubernetes.io/projected/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-kube-api-access-5m5ts\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.426422 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-config-volume\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.426503 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-secret-volume\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.427737 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-config-volume\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.445364 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-secret-volume\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.450342 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m5ts\" (UniqueName: \"kubernetes.io/projected/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-kube-api-access-5m5ts\") pod \"collect-profiles-29405235-z9wn4\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.483661 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:00 crc kubenswrapper[4922]: I1128 07:15:00.960540 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4"] Nov 28 07:15:01 crc kubenswrapper[4922]: I1128 07:15:01.683403 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" event={"ID":"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1","Type":"ContainerStarted","Data":"7804cd15322053c6fc4b48a940b3dd0b3ccb5ab0448cdeb3a17413ecf62ce09e"} Nov 28 07:15:04 crc kubenswrapper[4922]: I1128 07:15:04.724550 4922 generic.go:334] "Generic (PLEG): container finished" podID="ac5d004b-d4ac-4de9-916f-0d4547eeb3d1" containerID="2f70c3b7bb43f1b685383f2a1c85d8bc8b3fadc3ca1f8eabee35bca8d3b9ebec" exitCode=0 Nov 28 07:15:04 crc kubenswrapper[4922]: I1128 07:15:04.724762 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" event={"ID":"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1","Type":"ContainerDied","Data":"2f70c3b7bb43f1b685383f2a1c85d8bc8b3fadc3ca1f8eabee35bca8d3b9ebec"} Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.413652 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.589346 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-secret-volume\") pod \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.589471 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-config-volume\") pod \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.589951 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5m5ts\" (UniqueName: \"kubernetes.io/projected/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-kube-api-access-5m5ts\") pod \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\" (UID: \"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1\") " Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.590892 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-config-volume" (OuterVolumeSpecName: "config-volume") pod "ac5d004b-d4ac-4de9-916f-0d4547eeb3d1" (UID: "ac5d004b-d4ac-4de9-916f-0d4547eeb3d1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.593660 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ac5d004b-d4ac-4de9-916f-0d4547eeb3d1" (UID: "ac5d004b-d4ac-4de9-916f-0d4547eeb3d1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.594934 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-kube-api-access-5m5ts" (OuterVolumeSpecName: "kube-api-access-5m5ts") pod "ac5d004b-d4ac-4de9-916f-0d4547eeb3d1" (UID: "ac5d004b-d4ac-4de9-916f-0d4547eeb3d1"). InnerVolumeSpecName "kube-api-access-5m5ts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.691860 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5m5ts\" (UniqueName: \"kubernetes.io/projected/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-kube-api-access-5m5ts\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.692065 4922 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.692074 4922 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5d004b-d4ac-4de9-916f-0d4547eeb3d1-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.774741 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" event={"ID":"ac5d004b-d4ac-4de9-916f-0d4547eeb3d1","Type":"ContainerDied","Data":"7804cd15322053c6fc4b48a940b3dd0b3ccb5ab0448cdeb3a17413ecf62ce09e"} Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.774780 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405235-z9wn4" Nov 28 07:15:08 crc kubenswrapper[4922]: I1128 07:15:08.774818 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7804cd15322053c6fc4b48a940b3dd0b3ccb5ab0448cdeb3a17413ecf62ce09e" Nov 28 07:15:09 crc kubenswrapper[4922]: I1128 07:15:09.792039 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"23b4034aad6d0adc0dd28dbcdf65c0e6bd65ce23fd1a27f18dd635918d96a2c8"} Nov 28 07:15:09 crc kubenswrapper[4922]: I1128 07:15:09.793408 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"7c24d63778c887d25e502fb8e85199d831af25e203c2a011764b487e1c2c78a1"} Nov 28 07:15:09 crc kubenswrapper[4922]: I1128 07:15:09.795781 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wk4t8" event={"ID":"2e87eedc-5f8a-46e2-bce1-c0361074a7f0","Type":"ContainerStarted","Data":"a64d396721a8963cced703ead55e89ffd3e5ecf1115b154b1c9a98c9aa628afe"} Nov 28 07:15:10 crc kubenswrapper[4922]: I1128 07:15:10.813903 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"853f98691c2c9eea9fa65f3f60694178106dd650a20abb2f93f5e169033283dc"} Nov 28 07:15:10 crc kubenswrapper[4922]: I1128 07:15:10.840659 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-wk4t8" podStartSLOduration=3.67926732 podStartE2EDuration="48.840642872s" podCreationTimestamp="2025-11-28 07:14:22 +0000 UTC" firstStartedPulling="2025-11-28 07:14:23.176433994 +0000 UTC m=+1308.096829566" lastFinishedPulling="2025-11-28 07:15:08.337809536 +0000 UTC m=+1353.258205118" observedRunningTime="2025-11-28 07:15:10.835398793 +0000 UTC m=+1355.755794375" watchObservedRunningTime="2025-11-28 07:15:10.840642872 +0000 UTC m=+1355.761038454" Nov 28 07:15:11 crc kubenswrapper[4922]: I1128 07:15:11.835091 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"12b16430a89a5b1e56a5f6468f519f0cc78f9332249e9e70b835663f8c8cf7f6"} Nov 28 07:15:14 crc kubenswrapper[4922]: I1128 07:15:14.872898 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"a015be7baa9e4f6ad1e91446da7a6a6130d283d562293232f646fbaf0306cb39"} Nov 28 07:15:14 crc kubenswrapper[4922]: I1128 07:15:14.873382 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"b375cb1d189915e8086ce1f9c17697360db2e2a53cd535ab4f5f9cf1df90a46c"} Nov 28 07:15:14 crc kubenswrapper[4922]: I1128 07:15:14.873394 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"e4501a3ca18be2fca539c740e8acc3816b850c674385c2c3d353d186ec5bbec5"} Nov 28 07:15:15 crc kubenswrapper[4922]: I1128 07:15:15.887680 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"ca8c40529da8875dd3000d01b8bd8b36258e6bc188214902c4efe8f876ef3f55"} Nov 28 07:15:15 crc kubenswrapper[4922]: I1128 07:15:15.888817 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"be7c02ef3f8f4787f48ed58720143cde50c0717059e0ee9cfb0efd2e23816536"} Nov 28 07:15:15 crc kubenswrapper[4922]: I1128 07:15:15.889067 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"cdce219795ca597bf90a3ef5a37914a0508699345238c0f783326c7603848cb7"} Nov 28 07:15:15 crc kubenswrapper[4922]: I1128 07:15:15.889171 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerStarted","Data":"f0b2a682fe557d959bf583e5ecb3012d332cc749a2c216c33f7c5ba7ffe503cb"} Nov 28 07:15:15 crc kubenswrapper[4922]: I1128 07:15:15.934446 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=47.284215786 podStartE2EDuration="1m12.934428521s" podCreationTimestamp="2025-11-28 07:14:03 +0000 UTC" firstStartedPulling="2025-11-28 07:14:48.145610683 +0000 UTC m=+1333.066006265" lastFinishedPulling="2025-11-28 07:15:13.795823408 +0000 UTC m=+1358.716219000" observedRunningTime="2025-11-28 07:15:15.929566822 +0000 UTC m=+1360.849962394" watchObservedRunningTime="2025-11-28 07:15:15.934428521 +0000 UTC m=+1360.854824103" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.201007 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6dccfc5657-4j5qj"] Nov 28 07:15:16 crc kubenswrapper[4922]: E1128 07:15:16.201357 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5d004b-d4ac-4de9-916f-0d4547eeb3d1" containerName="collect-profiles" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.201377 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5d004b-d4ac-4de9-916f-0d4547eeb3d1" containerName="collect-profiles" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.201527 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5d004b-d4ac-4de9-916f-0d4547eeb3d1" containerName="collect-profiles" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.202313 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.204614 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.219485 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dccfc5657-4j5qj"] Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.328973 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-svc\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.329028 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpkr5\" (UniqueName: \"kubernetes.io/projected/990834b6-2f6d-466d-aded-ce38de14641b-kube-api-access-hpkr5\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.329204 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-nb\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.329486 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-config\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.329559 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-swift-storage-0\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.329618 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-sb\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.431132 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-svc\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.431195 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpkr5\" (UniqueName: \"kubernetes.io/projected/990834b6-2f6d-466d-aded-ce38de14641b-kube-api-access-hpkr5\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.431301 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-nb\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.431368 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-config\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.431392 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-swift-storage-0\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.431415 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-sb\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.432146 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-nb\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.432310 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-sb\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.432319 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-swift-storage-0\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.432425 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-svc\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.432832 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-config\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.455790 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpkr5\" (UniqueName: \"kubernetes.io/projected/990834b6-2f6d-466d-aded-ce38de14641b-kube-api-access-hpkr5\") pod \"dnsmasq-dns-6dccfc5657-4j5qj\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.536778 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:16 crc kubenswrapper[4922]: W1128 07:15:16.833833 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod990834b6_2f6d_466d_aded_ce38de14641b.slice/crio-2ff87e4f65153a33e2adf4268759c104367f5a3a0ad74625a4a4fc6988641466 WatchSource:0}: Error finding container 2ff87e4f65153a33e2adf4268759c104367f5a3a0ad74625a4a4fc6988641466: Status 404 returned error can't find the container with id 2ff87e4f65153a33e2adf4268759c104367f5a3a0ad74625a4a4fc6988641466 Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.835573 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dccfc5657-4j5qj"] Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.908512 4922 generic.go:334] "Generic (PLEG): container finished" podID="33e395bd-1a5e-4c48-829d-e7b3408e9b8e" containerID="29b28ab9109f2b7a10e56ca5f88ae46e6843947c82e9c3c79ada80258c2f7af2" exitCode=0 Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.908866 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7l6kx" event={"ID":"33e395bd-1a5e-4c48-829d-e7b3408e9b8e","Type":"ContainerDied","Data":"29b28ab9109f2b7a10e56ca5f88ae46e6843947c82e9c3c79ada80258c2f7af2"} Nov 28 07:15:16 crc kubenswrapper[4922]: I1128 07:15:16.914144 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" event={"ID":"990834b6-2f6d-466d-aded-ce38de14641b","Type":"ContainerStarted","Data":"2ff87e4f65153a33e2adf4268759c104367f5a3a0ad74625a4a4fc6988641466"} Nov 28 07:15:17 crc kubenswrapper[4922]: I1128 07:15:17.924771 4922 generic.go:334] "Generic (PLEG): container finished" podID="990834b6-2f6d-466d-aded-ce38de14641b" containerID="7f15ca43f2831f60e05edaa698473e4e87c4e5cc1860c85af1520f6a867a6a9f" exitCode=0 Nov 28 07:15:17 crc kubenswrapper[4922]: I1128 07:15:17.924884 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" event={"ID":"990834b6-2f6d-466d-aded-ce38de14641b","Type":"ContainerDied","Data":"7f15ca43f2831f60e05edaa698473e4e87c4e5cc1860c85af1520f6a867a6a9f"} Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.260279 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.361481 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-combined-ca-bundle\") pod \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.361578 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k4g9\" (UniqueName: \"kubernetes.io/projected/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-kube-api-access-6k4g9\") pod \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.361692 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-config-data\") pod \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\" (UID: \"33e395bd-1a5e-4c48-829d-e7b3408e9b8e\") " Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.365883 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-kube-api-access-6k4g9" (OuterVolumeSpecName: "kube-api-access-6k4g9") pod "33e395bd-1a5e-4c48-829d-e7b3408e9b8e" (UID: "33e395bd-1a5e-4c48-829d-e7b3408e9b8e"). InnerVolumeSpecName "kube-api-access-6k4g9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.390360 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33e395bd-1a5e-4c48-829d-e7b3408e9b8e" (UID: "33e395bd-1a5e-4c48-829d-e7b3408e9b8e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.407365 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-config-data" (OuterVolumeSpecName: "config-data") pod "33e395bd-1a5e-4c48-829d-e7b3408e9b8e" (UID: "33e395bd-1a5e-4c48-829d-e7b3408e9b8e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.465591 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.465631 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k4g9\" (UniqueName: \"kubernetes.io/projected/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-kube-api-access-6k4g9\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.465647 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e395bd-1a5e-4c48-829d-e7b3408e9b8e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.936959 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" event={"ID":"990834b6-2f6d-466d-aded-ce38de14641b","Type":"ContainerStarted","Data":"f7eb228b9c7d60c7085ef8372e741156b6d5658b5d7faeee0ec645aa4beae5bf"} Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.937945 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.938040 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7l6kx" event={"ID":"33e395bd-1a5e-4c48-829d-e7b3408e9b8e","Type":"ContainerDied","Data":"b6cf9e43f8947de8c7eed6ff8803d21971b37430564395c825d05a153febbc3d"} Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.938060 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6cf9e43f8947de8c7eed6ff8803d21971b37430564395c825d05a153febbc3d" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.938153 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7l6kx" Nov 28 07:15:18 crc kubenswrapper[4922]: I1128 07:15:18.961312 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" podStartSLOduration=2.9612807930000002 podStartE2EDuration="2.961280793s" podCreationTimestamp="2025-11-28 07:15:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:18.954148984 +0000 UTC m=+1363.874544576" watchObservedRunningTime="2025-11-28 07:15:18.961280793 +0000 UTC m=+1363.881676415" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.208185 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dccfc5657-4j5qj"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.234590 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8z2fq"] Nov 28 07:15:19 crc kubenswrapper[4922]: E1128 07:15:19.234988 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33e395bd-1a5e-4c48-829d-e7b3408e9b8e" containerName="keystone-db-sync" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.235007 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="33e395bd-1a5e-4c48-829d-e7b3408e9b8e" containerName="keystone-db-sync" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.235187 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="33e395bd-1a5e-4c48-829d-e7b3408e9b8e" containerName="keystone-db-sync" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.235753 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.239715 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.239789 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.239849 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.239797 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.242262 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-vx657" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.243733 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75c8c9c785-w4vmq"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.245308 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.254259 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8c9c785-w4vmq"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.277753 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8z2fq"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307476 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-svc\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307567 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-combined-ca-bundle\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307594 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dntcl\" (UniqueName: \"kubernetes.io/projected/ed81fd9d-457d-42fd-a1b3-322f8250b89c-kube-api-access-dntcl\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307623 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-scripts\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307649 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307668 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307686 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-fernet-keys\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307703 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-config-data\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307741 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307772 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-config\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307790 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spvb7\" (UniqueName: \"kubernetes.io/projected/517eadfa-5fbe-4763-b9cc-03f53087dff6-kube-api-access-spvb7\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.307808 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-credential-keys\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.407351 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-4p4rg"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.408458 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409095 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409132 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409152 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-fernet-keys\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409176 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-config-data\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409202 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409265 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-config\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409296 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spvb7\" (UniqueName: \"kubernetes.io/projected/517eadfa-5fbe-4763-b9cc-03f53087dff6-kube-api-access-spvb7\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409313 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-credential-keys\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409352 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-svc\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409403 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-combined-ca-bundle\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409429 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dntcl\" (UniqueName: \"kubernetes.io/projected/ed81fd9d-457d-42fd-a1b3-322f8250b89c-kube-api-access-dntcl\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.409453 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-scripts\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.410440 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.412144 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.420257 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-scripts\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.420374 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4r9g9" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.421205 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-svc\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.421765 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-credential-keys\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.421810 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-config\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.421887 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.421966 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.422113 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.427692 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-fernet-keys\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.428149 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-config-data\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.430141 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-4p4rg"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.440647 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-combined-ca-bundle\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.444509 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dntcl\" (UniqueName: \"kubernetes.io/projected/ed81fd9d-457d-42fd-a1b3-322f8250b89c-kube-api-access-dntcl\") pod \"keystone-bootstrap-8z2fq\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.449046 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spvb7\" (UniqueName: \"kubernetes.io/projected/517eadfa-5fbe-4763-b9cc-03f53087dff6-kube-api-access-spvb7\") pod \"dnsmasq-dns-75c8c9c785-w4vmq\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.510883 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-db-sync-config-data\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.511008 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-etc-machine-id\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.511039 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-combined-ca-bundle\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.511081 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-config-data\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.511098 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-scripts\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.511147 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67j4r\" (UniqueName: \"kubernetes.io/projected/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-kube-api-access-67j4r\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.554379 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-cj2fg"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.557316 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.560141 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.560876 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.561194 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.561240 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-r6kx9" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.569519 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.602272 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-cj2fg"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.615183 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-etc-machine-id\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.615247 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-combined-ca-bundle\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.615279 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-config-data\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.615296 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-scripts\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.615334 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67j4r\" (UniqueName: \"kubernetes.io/projected/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-kube-api-access-67j4r\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.615368 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-db-sync-config-data\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.618417 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-gc24f"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.619515 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.626435 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.626628 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-9rnmt" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.626802 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.627275 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-etc-machine-id\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.632750 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8c9c785-w4vmq"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.633017 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-db-sync-config-data\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.643004 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.645279 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.645757 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-scripts\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.648877 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.649427 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.651834 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-combined-ca-bundle\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.656408 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-config-data\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.668440 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.672932 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67j4r\" (UniqueName: \"kubernetes.io/projected/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-kube-api-access-67j4r\") pod \"cinder-db-sync-4p4rg\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.687578 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-gc24f"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.702758 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-4q5pb"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.704066 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.708518 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.708701 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-6g2s9" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.716446 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-combined-ca-bundle\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.716505 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbj4m\" (UniqueName: \"kubernetes.io/projected/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-kube-api-access-lbj4m\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.716555 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-config\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.730302 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-4q5pb"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.740873 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cd6764b85-lqks7"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.743316 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.749268 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd6764b85-lqks7"] Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.799959 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818326 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-combined-ca-bundle\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818369 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-combined-ca-bundle\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818400 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-db-sync-config-data\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818425 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-log-httpd\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818464 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbj4m\" (UniqueName: \"kubernetes.io/projected/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-kube-api-access-lbj4m\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818499 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bfrw\" (UniqueName: \"kubernetes.io/projected/086c6cef-6f21-46b9-ace7-f06ffff84fb3-kube-api-access-2bfrw\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818526 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-scripts\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818576 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-config\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818600 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxs2b\" (UniqueName: \"kubernetes.io/projected/aa053525-2f00-4415-b9b5-35948c8e5038-kube-api-access-sxs2b\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818630 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8pzb\" (UniqueName: \"kubernetes.io/projected/be4be2de-2f46-4982-8cd6-b73888d293af-kube-api-access-b8pzb\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818654 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-config-data\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818674 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/086c6cef-6f21-46b9-ace7-f06ffff84fb3-logs\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818715 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-run-httpd\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818741 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-scripts\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818795 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-config-data\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818833 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818885 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-combined-ca-bundle\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.818917 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.830163 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-combined-ca-bundle\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.838979 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-config\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.843925 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbj4m\" (UniqueName: \"kubernetes.io/projected/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-kube-api-access-lbj4m\") pod \"neutron-db-sync-cj2fg\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.878317 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920232 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-svc\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920291 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxs2b\" (UniqueName: \"kubernetes.io/projected/aa053525-2f00-4415-b9b5-35948c8e5038-kube-api-access-sxs2b\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920316 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8pzb\" (UniqueName: \"kubernetes.io/projected/be4be2de-2f46-4982-8cd6-b73888d293af-kube-api-access-b8pzb\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920339 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-config-data\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920354 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/086c6cef-6f21-46b9-ace7-f06ffff84fb3-logs\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920378 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-config\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920395 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-run-httpd\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920418 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-scripts\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920460 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-config-data\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920488 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920519 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920539 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bjqg\" (UniqueName: \"kubernetes.io/projected/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-kube-api-access-6bjqg\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920563 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-combined-ca-bundle\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920579 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-combined-ca-bundle\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920596 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-db-sync-config-data\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920611 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-log-httpd\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.920627 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.921254 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-run-httpd\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.921546 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/086c6cef-6f21-46b9-ace7-f06ffff84fb3-logs\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.921592 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-log-httpd\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.921741 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.921835 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bfrw\" (UniqueName: \"kubernetes.io/projected/086c6cef-6f21-46b9-ace7-f06ffff84fb3-kube-api-access-2bfrw\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.921864 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-scripts\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.921883 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.927016 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-db-sync-config-data\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.927320 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.934682 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.934907 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-config-data\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.936093 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-scripts\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.937072 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-combined-ca-bundle\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.937205 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxs2b\" (UniqueName: \"kubernetes.io/projected/aa053525-2f00-4415-b9b5-35948c8e5038-kube-api-access-sxs2b\") pod \"barbican-db-sync-4q5pb\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.940170 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-config-data\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.942639 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-combined-ca-bundle\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.946973 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bfrw\" (UniqueName: \"kubernetes.io/projected/086c6cef-6f21-46b9-ace7-f06ffff84fb3-kube-api-access-2bfrw\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.947962 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8pzb\" (UniqueName: \"kubernetes.io/projected/be4be2de-2f46-4982-8cd6-b73888d293af-kube-api-access-b8pzb\") pod \"ceilometer-0\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " pod="openstack/ceilometer-0" Nov 28 07:15:19 crc kubenswrapper[4922]: I1128 07:15:19.949439 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-scripts\") pod \"placement-db-sync-gc24f\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.024359 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-config\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.024479 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bjqg\" (UniqueName: \"kubernetes.io/projected/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-kube-api-access-6bjqg\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.024510 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.024533 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.024562 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.024587 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-svc\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.025357 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-config\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.025468 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-svc\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.025455 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.026190 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.028574 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.049521 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bjqg\" (UniqueName: \"kubernetes.io/projected/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-kube-api-access-6bjqg\") pod \"dnsmasq-dns-6cd6764b85-lqks7\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.055780 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.075723 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.088470 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.096076 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-4p4rg"] Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.096858 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.117051 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8z2fq"] Nov 28 07:15:20 crc kubenswrapper[4922]: W1128 07:15:20.119945 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2bf381d5_4211_44a0_8fd6_4b1c05fb690d.slice/crio-39ea7b728178336f28d719801f1e298e8f3969a4bbae3640048e674dcf9201be WatchSource:0}: Error finding container 39ea7b728178336f28d719801f1e298e8f3969a4bbae3640048e674dcf9201be: Status 404 returned error can't find the container with id 39ea7b728178336f28d719801f1e298e8f3969a4bbae3640048e674dcf9201be Nov 28 07:15:20 crc kubenswrapper[4922]: W1128 07:15:20.138465 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded81fd9d_457d_42fd_a1b3_322f8250b89c.slice/crio-4eb940f9768247254f85ba84b1499881a719b01313530d12727765e2284e4ef6 WatchSource:0}: Error finding container 4eb940f9768247254f85ba84b1499881a719b01313530d12727765e2284e4ef6: Status 404 returned error can't find the container with id 4eb940f9768247254f85ba84b1499881a719b01313530d12727765e2284e4ef6 Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.244091 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8c9c785-w4vmq"] Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.435043 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-cj2fg"] Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.825884 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-4q5pb"] Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.854419 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-gc24f"] Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.916123 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd6764b85-lqks7"] Nov 28 07:15:20 crc kubenswrapper[4922]: W1128 07:15:20.933333 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode07cd6b9_6f1b_40ed_97ea_f81c981b6967.slice/crio-1bef8a3551cf46621a75871ad3010254d47edd8a72aa3248e15ece94117ac02d WatchSource:0}: Error finding container 1bef8a3551cf46621a75871ad3010254d47edd8a72aa3248e15ece94117ac02d: Status 404 returned error can't find the container with id 1bef8a3551cf46621a75871ad3010254d47edd8a72aa3248e15ece94117ac02d Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.970457 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-4q5pb" event={"ID":"aa053525-2f00-4415-b9b5-35948c8e5038","Type":"ContainerStarted","Data":"3dc318bcfbd9f9a58f81f07d6f76a79bd21d4e8980189c900ee6d271fa7b4d98"} Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.971923 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cj2fg" event={"ID":"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3","Type":"ContainerStarted","Data":"3a18b37121a05838619515c25b85cf8300c82f732967d64971b59cf884f40ac9"} Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.971960 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cj2fg" event={"ID":"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3","Type":"ContainerStarted","Data":"f0bc20c1c25aad3398a6e80cba60b7e871940515455fc975b96a00ae9502aa74"} Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.973918 4922 generic.go:334] "Generic (PLEG): container finished" podID="517eadfa-5fbe-4763-b9cc-03f53087dff6" containerID="f45ad76480bb522a954cf473eacf7fd0e6b3b2ff8d2955c0a2fd3ffe99818be7" exitCode=0 Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.973973 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" event={"ID":"517eadfa-5fbe-4763-b9cc-03f53087dff6","Type":"ContainerDied","Data":"f45ad76480bb522a954cf473eacf7fd0e6b3b2ff8d2955c0a2fd3ffe99818be7"} Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.973997 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" event={"ID":"517eadfa-5fbe-4763-b9cc-03f53087dff6","Type":"ContainerStarted","Data":"3c3b80c1dc9d7d30028ccc12ce81d550d7372055555817fc4706bd52522b30ef"} Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.981206 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-4p4rg" event={"ID":"2bf381d5-4211-44a0-8fd6-4b1c05fb690d","Type":"ContainerStarted","Data":"39ea7b728178336f28d719801f1e298e8f3969a4bbae3640048e674dcf9201be"} Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.992291 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-cj2fg" podStartSLOduration=1.992267629 podStartE2EDuration="1.992267629s" podCreationTimestamp="2025-11-28 07:15:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:20.991733745 +0000 UTC m=+1365.912129327" watchObservedRunningTime="2025-11-28 07:15:20.992267629 +0000 UTC m=+1365.912663211" Nov 28 07:15:20 crc kubenswrapper[4922]: I1128 07:15:20.993878 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" event={"ID":"e07cd6b9-6f1b-40ed-97ea-f81c981b6967","Type":"ContainerStarted","Data":"1bef8a3551cf46621a75871ad3010254d47edd8a72aa3248e15ece94117ac02d"} Nov 28 07:15:21 crc kubenswrapper[4922]: I1128 07:15:21.000512 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:15:21 crc kubenswrapper[4922]: I1128 07:15:21.001017 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8z2fq" event={"ID":"ed81fd9d-457d-42fd-a1b3-322f8250b89c","Type":"ContainerStarted","Data":"cefaa4cdaf9ad55f4c7cbb99f1e2ca32d61f84e80b290f17490a70169206b776"} Nov 28 07:15:21 crc kubenswrapper[4922]: I1128 07:15:21.001063 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8z2fq" event={"ID":"ed81fd9d-457d-42fd-a1b3-322f8250b89c","Type":"ContainerStarted","Data":"4eb940f9768247254f85ba84b1499881a719b01313530d12727765e2284e4ef6"} Nov 28 07:15:21 crc kubenswrapper[4922]: I1128 07:15:21.006411 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" podUID="990834b6-2f6d-466d-aded-ce38de14641b" containerName="dnsmasq-dns" containerID="cri-o://f7eb228b9c7d60c7085ef8372e741156b6d5658b5d7faeee0ec645aa4beae5bf" gracePeriod=10 Nov 28 07:15:21 crc kubenswrapper[4922]: I1128 07:15:21.006507 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gc24f" event={"ID":"086c6cef-6f21-46b9-ace7-f06ffff84fb3","Type":"ContainerStarted","Data":"35f62c0a38a6d29c6420d875471fe7250ac35210090479b2898d208c97208c2e"} Nov 28 07:15:21 crc kubenswrapper[4922]: I1128 07:15:21.039104 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8z2fq" podStartSLOduration=2.039083605 podStartE2EDuration="2.039083605s" podCreationTimestamp="2025-11-28 07:15:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:21.035712476 +0000 UTC m=+1365.956108058" watchObservedRunningTime="2025-11-28 07:15:21.039083605 +0000 UTC m=+1365.959479187" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.333212 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.466240 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spvb7\" (UniqueName: \"kubernetes.io/projected/517eadfa-5fbe-4763-b9cc-03f53087dff6-kube-api-access-spvb7\") pod \"517eadfa-5fbe-4763-b9cc-03f53087dff6\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.466552 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-svc\") pod \"517eadfa-5fbe-4763-b9cc-03f53087dff6\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.466647 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-sb\") pod \"517eadfa-5fbe-4763-b9cc-03f53087dff6\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.466697 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-nb\") pod \"517eadfa-5fbe-4763-b9cc-03f53087dff6\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.466750 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-swift-storage-0\") pod \"517eadfa-5fbe-4763-b9cc-03f53087dff6\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.466828 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-config\") pod \"517eadfa-5fbe-4763-b9cc-03f53087dff6\" (UID: \"517eadfa-5fbe-4763-b9cc-03f53087dff6\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.480386 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/517eadfa-5fbe-4763-b9cc-03f53087dff6-kube-api-access-spvb7" (OuterVolumeSpecName: "kube-api-access-spvb7") pod "517eadfa-5fbe-4763-b9cc-03f53087dff6" (UID: "517eadfa-5fbe-4763-b9cc-03f53087dff6"). InnerVolumeSpecName "kube-api-access-spvb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.519486 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "517eadfa-5fbe-4763-b9cc-03f53087dff6" (UID: "517eadfa-5fbe-4763-b9cc-03f53087dff6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.523855 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-config" (OuterVolumeSpecName: "config") pod "517eadfa-5fbe-4763-b9cc-03f53087dff6" (UID: "517eadfa-5fbe-4763-b9cc-03f53087dff6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.531888 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "517eadfa-5fbe-4763-b9cc-03f53087dff6" (UID: "517eadfa-5fbe-4763-b9cc-03f53087dff6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.539835 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "517eadfa-5fbe-4763-b9cc-03f53087dff6" (UID: "517eadfa-5fbe-4763-b9cc-03f53087dff6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.568753 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.568789 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spvb7\" (UniqueName: \"kubernetes.io/projected/517eadfa-5fbe-4763-b9cc-03f53087dff6-kube-api-access-spvb7\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.568805 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.568816 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.568827 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.574901 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "517eadfa-5fbe-4763-b9cc-03f53087dff6" (UID: "517eadfa-5fbe-4763-b9cc-03f53087dff6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:21.671512 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/517eadfa-5fbe-4763-b9cc-03f53087dff6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.031960 4922 generic.go:334] "Generic (PLEG): container finished" podID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerID="4c94b7d931beaa31868228e1f15ba327334a2d49d32e1bac4297f9cc03a469ec" exitCode=0 Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.032069 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" event={"ID":"e07cd6b9-6f1b-40ed-97ea-f81c981b6967","Type":"ContainerDied","Data":"4c94b7d931beaa31868228e1f15ba327334a2d49d32e1bac4297f9cc03a469ec"} Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.034682 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4be2de-2f46-4982-8cd6-b73888d293af","Type":"ContainerStarted","Data":"1eddf0b62cd8a11d42c7e5cc0a808e27b0d422aed71302dbb4ff35c4b7f03d47"} Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.042890 4922 generic.go:334] "Generic (PLEG): container finished" podID="990834b6-2f6d-466d-aded-ce38de14641b" containerID="f7eb228b9c7d60c7085ef8372e741156b6d5658b5d7faeee0ec645aa4beae5bf" exitCode=0 Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.043003 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" event={"ID":"990834b6-2f6d-466d-aded-ce38de14641b","Type":"ContainerDied","Data":"f7eb228b9c7d60c7085ef8372e741156b6d5658b5d7faeee0ec645aa4beae5bf"} Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.056560 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.057797 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8c9c785-w4vmq" event={"ID":"517eadfa-5fbe-4763-b9cc-03f53087dff6","Type":"ContainerDied","Data":"3c3b80c1dc9d7d30028ccc12ce81d550d7372055555817fc4706bd52522b30ef"} Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.057850 4922 scope.go:117] "RemoveContainer" containerID="f45ad76480bb522a954cf473eacf7fd0e6b3b2ff8d2955c0a2fd3ffe99818be7" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.142953 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8c9c785-w4vmq"] Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.153194 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75c8c9c785-w4vmq"] Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.337917 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.399702 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.517043 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-swift-storage-0\") pod \"990834b6-2f6d-466d-aded-ce38de14641b\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.517451 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-sb\") pod \"990834b6-2f6d-466d-aded-ce38de14641b\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.517481 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpkr5\" (UniqueName: \"kubernetes.io/projected/990834b6-2f6d-466d-aded-ce38de14641b-kube-api-access-hpkr5\") pod \"990834b6-2f6d-466d-aded-ce38de14641b\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.517518 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-nb\") pod \"990834b6-2f6d-466d-aded-ce38de14641b\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.517577 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-svc\") pod \"990834b6-2f6d-466d-aded-ce38de14641b\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.517654 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-config\") pod \"990834b6-2f6d-466d-aded-ce38de14641b\" (UID: \"990834b6-2f6d-466d-aded-ce38de14641b\") " Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.527441 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/990834b6-2f6d-466d-aded-ce38de14641b-kube-api-access-hpkr5" (OuterVolumeSpecName: "kube-api-access-hpkr5") pod "990834b6-2f6d-466d-aded-ce38de14641b" (UID: "990834b6-2f6d-466d-aded-ce38de14641b"). InnerVolumeSpecName "kube-api-access-hpkr5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.571075 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "990834b6-2f6d-466d-aded-ce38de14641b" (UID: "990834b6-2f6d-466d-aded-ce38de14641b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.572246 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "990834b6-2f6d-466d-aded-ce38de14641b" (UID: "990834b6-2f6d-466d-aded-ce38de14641b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.583995 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "990834b6-2f6d-466d-aded-ce38de14641b" (UID: "990834b6-2f6d-466d-aded-ce38de14641b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.608907 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-config" (OuterVolumeSpecName: "config") pod "990834b6-2f6d-466d-aded-ce38de14641b" (UID: "990834b6-2f6d-466d-aded-ce38de14641b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.611411 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "990834b6-2f6d-466d-aded-ce38de14641b" (UID: "990834b6-2f6d-466d-aded-ce38de14641b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.619669 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.619693 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.619703 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.619713 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.619721 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/990834b6-2f6d-466d-aded-ce38de14641b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:22 crc kubenswrapper[4922]: I1128 07:15:22.619729 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpkr5\" (UniqueName: \"kubernetes.io/projected/990834b6-2f6d-466d-aded-ce38de14641b-kube-api-access-hpkr5\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.070883 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" event={"ID":"990834b6-2f6d-466d-aded-ce38de14641b","Type":"ContainerDied","Data":"2ff87e4f65153a33e2adf4268759c104367f5a3a0ad74625a4a4fc6988641466"} Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.070936 4922 scope.go:117] "RemoveContainer" containerID="f7eb228b9c7d60c7085ef8372e741156b6d5658b5d7faeee0ec645aa4beae5bf" Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.071030 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dccfc5657-4j5qj" Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.113652 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" event={"ID":"e07cd6b9-6f1b-40ed-97ea-f81c981b6967","Type":"ContainerStarted","Data":"fc22317ea027dd4ec1ec7ee83538f7895c02a88ccc4d651ef99e596d485a42c6"} Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.114475 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.120120 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dccfc5657-4j5qj"] Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.132600 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6dccfc5657-4j5qj"] Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.148257 4922 scope.go:117] "RemoveContainer" containerID="7f15ca43f2831f60e05edaa698473e4e87c4e5cc1860c85af1520f6a867a6a9f" Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.167658 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" podStartSLOduration=4.167642248 podStartE2EDuration="4.167642248s" podCreationTimestamp="2025-11-28 07:15:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:23.148691104 +0000 UTC m=+1368.069086686" watchObservedRunningTime="2025-11-28 07:15:23.167642248 +0000 UTC m=+1368.088037820" Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.422938 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="517eadfa-5fbe-4763-b9cc-03f53087dff6" path="/var/lib/kubelet/pods/517eadfa-5fbe-4763-b9cc-03f53087dff6/volumes" Nov 28 07:15:23 crc kubenswrapper[4922]: I1128 07:15:23.423791 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="990834b6-2f6d-466d-aded-ce38de14641b" path="/var/lib/kubelet/pods/990834b6-2f6d-466d-aded-ce38de14641b/volumes" Nov 28 07:15:24 crc kubenswrapper[4922]: I1128 07:15:24.132934 4922 generic.go:334] "Generic (PLEG): container finished" podID="2e87eedc-5f8a-46e2-bce1-c0361074a7f0" containerID="a64d396721a8963cced703ead55e89ffd3e5ecf1115b154b1c9a98c9aa628afe" exitCode=0 Nov 28 07:15:24 crc kubenswrapper[4922]: I1128 07:15:24.133031 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wk4t8" event={"ID":"2e87eedc-5f8a-46e2-bce1-c0361074a7f0","Type":"ContainerDied","Data":"a64d396721a8963cced703ead55e89ffd3e5ecf1115b154b1c9a98c9aa628afe"} Nov 28 07:15:25 crc kubenswrapper[4922]: I1128 07:15:25.153869 4922 generic.go:334] "Generic (PLEG): container finished" podID="ed81fd9d-457d-42fd-a1b3-322f8250b89c" containerID="cefaa4cdaf9ad55f4c7cbb99f1e2ca32d61f84e80b290f17490a70169206b776" exitCode=0 Nov 28 07:15:25 crc kubenswrapper[4922]: I1128 07:15:25.153943 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8z2fq" event={"ID":"ed81fd9d-457d-42fd-a1b3-322f8250b89c","Type":"ContainerDied","Data":"cefaa4cdaf9ad55f4c7cbb99f1e2ca32d61f84e80b290f17490a70169206b776"} Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.311716 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.312541 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.805661 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wk4t8" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.837456 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.932822 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-combined-ca-bundle\") pod \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.932890 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-config-data\") pod \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.932971 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-config-data\") pod \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.933017 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-credential-keys\") pod \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.933734 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f6qb\" (UniqueName: \"kubernetes.io/projected/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-kube-api-access-4f6qb\") pod \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.933985 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-fernet-keys\") pod \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.934005 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-scripts\") pod \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.934020 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-combined-ca-bundle\") pod \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.934045 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dntcl\" (UniqueName: \"kubernetes.io/projected/ed81fd9d-457d-42fd-a1b3-322f8250b89c-kube-api-access-dntcl\") pod \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\" (UID: \"ed81fd9d-457d-42fd-a1b3-322f8250b89c\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.934724 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-db-sync-config-data\") pod \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\" (UID: \"2e87eedc-5f8a-46e2-bce1-c0361074a7f0\") " Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.938485 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ed81fd9d-457d-42fd-a1b3-322f8250b89c" (UID: "ed81fd9d-457d-42fd-a1b3-322f8250b89c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.938745 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed81fd9d-457d-42fd-a1b3-322f8250b89c-kube-api-access-dntcl" (OuterVolumeSpecName: "kube-api-access-dntcl") pod "ed81fd9d-457d-42fd-a1b3-322f8250b89c" (UID: "ed81fd9d-457d-42fd-a1b3-322f8250b89c"). InnerVolumeSpecName "kube-api-access-dntcl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.940540 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2e87eedc-5f8a-46e2-bce1-c0361074a7f0" (UID: "2e87eedc-5f8a-46e2-bce1-c0361074a7f0"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.941312 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-kube-api-access-4f6qb" (OuterVolumeSpecName: "kube-api-access-4f6qb") pod "2e87eedc-5f8a-46e2-bce1-c0361074a7f0" (UID: "2e87eedc-5f8a-46e2-bce1-c0361074a7f0"). InnerVolumeSpecName "kube-api-access-4f6qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.960254 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ed81fd9d-457d-42fd-a1b3-322f8250b89c" (UID: "ed81fd9d-457d-42fd-a1b3-322f8250b89c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.961383 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed81fd9d-457d-42fd-a1b3-322f8250b89c" (UID: "ed81fd9d-457d-42fd-a1b3-322f8250b89c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.964838 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-config-data" (OuterVolumeSpecName: "config-data") pod "ed81fd9d-457d-42fd-a1b3-322f8250b89c" (UID: "ed81fd9d-457d-42fd-a1b3-322f8250b89c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.970379 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-scripts" (OuterVolumeSpecName: "scripts") pod "ed81fd9d-457d-42fd-a1b3-322f8250b89c" (UID: "ed81fd9d-457d-42fd-a1b3-322f8250b89c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:27 crc kubenswrapper[4922]: I1128 07:15:27.972431 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2e87eedc-5f8a-46e2-bce1-c0361074a7f0" (UID: "2e87eedc-5f8a-46e2-bce1-c0361074a7f0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.002450 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-config-data" (OuterVolumeSpecName: "config-data") pod "2e87eedc-5f8a-46e2-bce1-c0361074a7f0" (UID: "2e87eedc-5f8a-46e2-bce1-c0361074a7f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037058 4922 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037100 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037112 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037125 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dntcl\" (UniqueName: \"kubernetes.io/projected/ed81fd9d-457d-42fd-a1b3-322f8250b89c-kube-api-access-dntcl\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037140 4922 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037153 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037165 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037176 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037185 4922 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ed81fd9d-457d-42fd-a1b3-322f8250b89c-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.037195 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4f6qb\" (UniqueName: \"kubernetes.io/projected/2e87eedc-5f8a-46e2-bce1-c0361074a7f0-kube-api-access-4f6qb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.193827 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8z2fq" event={"ID":"ed81fd9d-457d-42fd-a1b3-322f8250b89c","Type":"ContainerDied","Data":"4eb940f9768247254f85ba84b1499881a719b01313530d12727765e2284e4ef6"} Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.193873 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4eb940f9768247254f85ba84b1499881a719b01313530d12727765e2284e4ef6" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.193844 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8z2fq" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.195750 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-wk4t8" event={"ID":"2e87eedc-5f8a-46e2-bce1-c0361074a7f0","Type":"ContainerDied","Data":"7664d9cdefc78a93b2e51824bb9f221c2353941984db22ec91f05d94b3a7a35a"} Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.195791 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7664d9cdefc78a93b2e51824bb9f221c2353941984db22ec91f05d94b3a7a35a" Nov 28 07:15:28 crc kubenswrapper[4922]: I1128 07:15:28.195833 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-wk4t8" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.007922 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8z2fq"] Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.016963 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8z2fq"] Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.103918 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-fj8qk"] Nov 28 07:15:29 crc kubenswrapper[4922]: E1128 07:15:29.104290 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed81fd9d-457d-42fd-a1b3-322f8250b89c" containerName="keystone-bootstrap" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104305 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed81fd9d-457d-42fd-a1b3-322f8250b89c" containerName="keystone-bootstrap" Nov 28 07:15:29 crc kubenswrapper[4922]: E1128 07:15:29.104319 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="990834b6-2f6d-466d-aded-ce38de14641b" containerName="init" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104325 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="990834b6-2f6d-466d-aded-ce38de14641b" containerName="init" Nov 28 07:15:29 crc kubenswrapper[4922]: E1128 07:15:29.104335 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="990834b6-2f6d-466d-aded-ce38de14641b" containerName="dnsmasq-dns" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104341 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="990834b6-2f6d-466d-aded-ce38de14641b" containerName="dnsmasq-dns" Nov 28 07:15:29 crc kubenswrapper[4922]: E1128 07:15:29.104349 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="517eadfa-5fbe-4763-b9cc-03f53087dff6" containerName="init" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104355 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="517eadfa-5fbe-4763-b9cc-03f53087dff6" containerName="init" Nov 28 07:15:29 crc kubenswrapper[4922]: E1128 07:15:29.104366 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e87eedc-5f8a-46e2-bce1-c0361074a7f0" containerName="glance-db-sync" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104371 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e87eedc-5f8a-46e2-bce1-c0361074a7f0" containerName="glance-db-sync" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104518 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="990834b6-2f6d-466d-aded-ce38de14641b" containerName="dnsmasq-dns" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104533 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed81fd9d-457d-42fd-a1b3-322f8250b89c" containerName="keystone-bootstrap" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104553 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e87eedc-5f8a-46e2-bce1-c0361074a7f0" containerName="glance-db-sync" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.104568 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="517eadfa-5fbe-4763-b9cc-03f53087dff6" containerName="init" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.105088 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.107692 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.107941 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.108076 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.114230 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-vx657" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.114634 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.119558 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fj8qk"] Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.158275 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-credential-keys\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.158334 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-scripts\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.158367 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-combined-ca-bundle\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.158387 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59l5n\" (UniqueName: \"kubernetes.io/projected/04965f12-78a7-459e-bbd6-0c716678f561-kube-api-access-59l5n\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.158409 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-fernet-keys\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.158438 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-config-data\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.261688 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-credential-keys\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.261761 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-scripts\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.261810 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59l5n\" (UniqueName: \"kubernetes.io/projected/04965f12-78a7-459e-bbd6-0c716678f561-kube-api-access-59l5n\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.261831 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-combined-ca-bundle\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.261861 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-fernet-keys\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.261900 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-config-data\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.280773 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-config-data\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.282878 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-combined-ca-bundle\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.284056 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-fernet-keys\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.287593 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-credential-keys\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.290408 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-scripts\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.305587 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cd6764b85-lqks7"] Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.313584 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="dnsmasq-dns" containerID="cri-o://fc22317ea027dd4ec1ec7ee83538f7895c02a88ccc4d651ef99e596d485a42c6" gracePeriod=10 Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.326642 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.344587 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59l5n\" (UniqueName: \"kubernetes.io/projected/04965f12-78a7-459e-bbd6-0c716678f561-kube-api-access-59l5n\") pod \"keystone-bootstrap-fj8qk\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.368672 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f5d458b55-bzzwb"] Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.370007 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.407437 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f5d458b55-bzzwb"] Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.443908 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed81fd9d-457d-42fd-a1b3-322f8250b89c" path="/var/lib/kubelet/pods/ed81fd9d-457d-42fd-a1b3-322f8250b89c/volumes" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.455657 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.491830 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.491875 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.491941 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.491996 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-svc\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.492027 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjktw\" (UniqueName: \"kubernetes.io/projected/2ee8a355-4756-4c55-89fe-0bb0bea586e8-kube-api-access-gjktw\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.492054 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-config\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.594882 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-svc\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.594960 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjktw\" (UniqueName: \"kubernetes.io/projected/2ee8a355-4756-4c55-89fe-0bb0bea586e8-kube-api-access-gjktw\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.594989 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-config\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.595075 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.595096 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.595162 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.595993 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-swift-storage-0\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.596553 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-svc\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.597348 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-config\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.597986 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-sb\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.598648 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-nb\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.622883 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjktw\" (UniqueName: \"kubernetes.io/projected/2ee8a355-4756-4c55-89fe-0bb0bea586e8-kube-api-access-gjktw\") pod \"dnsmasq-dns-5f5d458b55-bzzwb\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:29 crc kubenswrapper[4922]: I1128 07:15:29.791799 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.101310 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: connect: connection refused" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.129528 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.130797 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.135881 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-fnqzv" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.136033 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.136863 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.150780 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.206126 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.206472 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z656\" (UniqueName: \"kubernetes.io/projected/dcbb1549-6821-4dd8-b4b9-df263325b1d0-kube-api-access-4z656\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.206495 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.206528 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.206555 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-logs\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.206578 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.206624 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.235807 4922 generic.go:334] "Generic (PLEG): container finished" podID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerID="fc22317ea027dd4ec1ec7ee83538f7895c02a88ccc4d651ef99e596d485a42c6" exitCode=0 Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.235853 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" event={"ID":"e07cd6b9-6f1b-40ed-97ea-f81c981b6967","Type":"ContainerDied","Data":"fc22317ea027dd4ec1ec7ee83538f7895c02a88ccc4d651ef99e596d485a42c6"} Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.308188 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.308308 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z656\" (UniqueName: \"kubernetes.io/projected/dcbb1549-6821-4dd8-b4b9-df263325b1d0-kube-api-access-4z656\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.308339 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.308388 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.308422 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-logs\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.308458 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.308525 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.308853 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.309015 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.309275 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-logs\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.313794 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.316522 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.322086 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.330076 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z656\" (UniqueName: \"kubernetes.io/projected/dcbb1549-6821-4dd8-b4b9-df263325b1d0-kube-api-access-4z656\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.349174 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.453165 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.652131 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.653984 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.656078 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.663119 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.715650 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.715717 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-logs\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.715871 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.715920 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwbxb\" (UniqueName: \"kubernetes.io/projected/acf55119-1851-4271-ae26-4d21ccf6552d-kube-api-access-zwbxb\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.716002 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.716232 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.716266 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.818903 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.818968 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.819265 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.819848 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.819998 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-logs\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.820077 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.820113 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwbxb\" (UniqueName: \"kubernetes.io/projected/acf55119-1851-4271-ae26-4d21ccf6552d-kube-api-access-zwbxb\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.820155 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.820557 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-logs\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.821253 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.823973 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.825697 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.825698 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.843029 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwbxb\" (UniqueName: \"kubernetes.io/projected/acf55119-1851-4271-ae26-4d21ccf6552d-kube-api-access-zwbxb\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.870337 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:30 crc kubenswrapper[4922]: I1128 07:15:30.991101 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:15:32 crc kubenswrapper[4922]: I1128 07:15:32.396398 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:32 crc kubenswrapper[4922]: I1128 07:15:32.473963 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:35 crc kubenswrapper[4922]: I1128 07:15:35.097852 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: connect: connection refused" Nov 28 07:15:40 crc kubenswrapper[4922]: I1128 07:15:40.098318 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: connect: connection refused" Nov 28 07:15:40 crc kubenswrapper[4922]: I1128 07:15:40.099204 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:42 crc kubenswrapper[4922]: E1128 07:15:42.133481 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:2051e26a441f1ce22aeb8daa0137559d89bded994db8141c11dd580ae6d07a23" Nov 28 07:15:42 crc kubenswrapper[4922]: E1128 07:15:42.134030 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:2051e26a441f1ce22aeb8daa0137559d89bded994db8141c11dd580ae6d07a23,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n568h588h677h5bbh698h6bh664h6dh654h88h94h5d7h56bh58ch54fh77h65bh676h695h587h5dbh5c7hc5h669h5c7h74h8dhb6h57bh74h596h667q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b8pzb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(be4be2de-2f46-4982-8cd6-b73888d293af): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:15:43 crc kubenswrapper[4922]: E1128 07:15:43.266599 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b5266c9a26766fce2b92f95dff52d362a760f7baf1474cdcb33bd68570e096c0" Nov 28 07:15:43 crc kubenswrapper[4922]: E1128 07:15:43.266989 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b5266c9a26766fce2b92f95dff52d362a760f7baf1474cdcb33bd68570e096c0,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-67j4r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-4p4rg_openstack(2bf381d5-4211-44a0-8fd6-4b1c05fb690d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:15:43 crc kubenswrapper[4922]: E1128 07:15:43.268168 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-4p4rg" podUID="2bf381d5-4211-44a0-8fd6-4b1c05fb690d" Nov 28 07:15:43 crc kubenswrapper[4922]: E1128 07:15:43.385974 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b5266c9a26766fce2b92f95dff52d362a760f7baf1474cdcb33bd68570e096c0\\\"\"" pod="openstack/cinder-db-sync-4p4rg" podUID="2bf381d5-4211-44a0-8fd6-4b1c05fb690d" Nov 28 07:15:43 crc kubenswrapper[4922]: E1128 07:15:43.662005 4922 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:3a56b50437a0c9a9a7b30c10f5e43bbdb7d9a94b723c70d36f0b01ff545e00eb" Nov 28 07:15:43 crc kubenswrapper[4922]: E1128 07:15:43.662156 4922 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:3a56b50437a0c9a9a7b30c10f5e43bbdb7d9a94b723c70d36f0b01ff545e00eb,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sxs2b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-4q5pb_openstack(aa053525-2f00-4415-b9b5-35948c8e5038): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 07:15:43 crc kubenswrapper[4922]: E1128 07:15:43.663338 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-4q5pb" podUID="aa053525-2f00-4415-b9b5-35948c8e5038" Nov 28 07:15:43 crc kubenswrapper[4922]: I1128 07:15:43.931302 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.111041 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-sb\") pod \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.111498 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-config\") pod \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.111564 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-svc\") pod \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.111586 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-nb\") pod \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.111698 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-swift-storage-0\") pod \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.111737 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bjqg\" (UniqueName: \"kubernetes.io/projected/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-kube-api-access-6bjqg\") pod \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\" (UID: \"e07cd6b9-6f1b-40ed-97ea-f81c981b6967\") " Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.116654 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-kube-api-access-6bjqg" (OuterVolumeSpecName: "kube-api-access-6bjqg") pod "e07cd6b9-6f1b-40ed-97ea-f81c981b6967" (UID: "e07cd6b9-6f1b-40ed-97ea-f81c981b6967"). InnerVolumeSpecName "kube-api-access-6bjqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.156000 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f5d458b55-bzzwb"] Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.174719 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e07cd6b9-6f1b-40ed-97ea-f81c981b6967" (UID: "e07cd6b9-6f1b-40ed-97ea-f81c981b6967"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.175188 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e07cd6b9-6f1b-40ed-97ea-f81c981b6967" (UID: "e07cd6b9-6f1b-40ed-97ea-f81c981b6967"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.182715 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e07cd6b9-6f1b-40ed-97ea-f81c981b6967" (UID: "e07cd6b9-6f1b-40ed-97ea-f81c981b6967"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.188279 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-config" (OuterVolumeSpecName: "config") pod "e07cd6b9-6f1b-40ed-97ea-f81c981b6967" (UID: "e07cd6b9-6f1b-40ed-97ea-f81c981b6967"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.192628 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e07cd6b9-6f1b-40ed-97ea-f81c981b6967" (UID: "e07cd6b9-6f1b-40ed-97ea-f81c981b6967"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.221193 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bjqg\" (UniqueName: \"kubernetes.io/projected/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-kube-api-access-6bjqg\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.221272 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.221285 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.221294 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.221303 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.221312 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e07cd6b9-6f1b-40ed-97ea-f81c981b6967-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.235916 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-fj8qk"] Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.317492 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.409256 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gc24f" event={"ID":"086c6cef-6f21-46b9-ace7-f06ffff84fb3","Type":"ContainerStarted","Data":"6272661054f8024c9e3c8eadc23e10a3e6ff363967adc44a0dd8e60f1e941455"} Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.411279 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.413907 4922 generic.go:334] "Generic (PLEG): container finished" podID="4c7a8e1f-5f3c-451e-b407-6afb6d5443e3" containerID="3a18b37121a05838619515c25b85cf8300c82f732967d64971b59cf884f40ac9" exitCode=0 Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.413959 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cj2fg" event={"ID":"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3","Type":"ContainerDied","Data":"3a18b37121a05838619515c25b85cf8300c82f732967d64971b59cf884f40ac9"} Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.415808 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" event={"ID":"e07cd6b9-6f1b-40ed-97ea-f81c981b6967","Type":"ContainerDied","Data":"1bef8a3551cf46621a75871ad3010254d47edd8a72aa3248e15ece94117ac02d"} Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.415838 4922 scope.go:117] "RemoveContainer" containerID="fc22317ea027dd4ec1ec7ee83538f7895c02a88ccc4d651ef99e596d485a42c6" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.415925 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd6764b85-lqks7" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.426312 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" event={"ID":"2ee8a355-4756-4c55-89fe-0bb0bea586e8","Type":"ContainerStarted","Data":"666a40c7f897d54e54d892595dec0d8d38ae26bac8cd5458898bf6a486bd3835"} Nov 28 07:15:44 crc kubenswrapper[4922]: E1128 07:15:44.430467 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:3a56b50437a0c9a9a7b30c10f5e43bbdb7d9a94b723c70d36f0b01ff545e00eb\\\"\"" pod="openstack/barbican-db-sync-4q5pb" podUID="aa053525-2f00-4415-b9b5-35948c8e5038" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.431745 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-gc24f" podStartSLOduration=2.624021499 podStartE2EDuration="25.431733453s" podCreationTimestamp="2025-11-28 07:15:19 +0000 UTC" firstStartedPulling="2025-11-28 07:15:20.874560668 +0000 UTC m=+1365.794956250" lastFinishedPulling="2025-11-28 07:15:43.682272622 +0000 UTC m=+1388.602668204" observedRunningTime="2025-11-28 07:15:44.43050913 +0000 UTC m=+1389.350904712" watchObservedRunningTime="2025-11-28 07:15:44.431733453 +0000 UTC m=+1389.352129035" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.481732 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cd6764b85-lqks7"] Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.487896 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cd6764b85-lqks7"] Nov 28 07:15:44 crc kubenswrapper[4922]: W1128 07:15:44.521560 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddcbb1549_6821_4dd8_b4b9_df263325b1d0.slice/crio-8c2e60ed7c1c83a5a6517b25eee5187ea591bb8b871d8cb73dbf156a6918a724 WatchSource:0}: Error finding container 8c2e60ed7c1c83a5a6517b25eee5187ea591bb8b871d8cb73dbf156a6918a724: Status 404 returned error can't find the container with id 8c2e60ed7c1c83a5a6517b25eee5187ea591bb8b871d8cb73dbf156a6918a724 Nov 28 07:15:44 crc kubenswrapper[4922]: W1128 07:15:44.522208 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podacf55119_1851_4271_ae26_4d21ccf6552d.slice/crio-15a6a342db407286fdd4bb3be5307bb2ff6dbaa6a2cee1fa3658d2960d767d52 WatchSource:0}: Error finding container 15a6a342db407286fdd4bb3be5307bb2ff6dbaa6a2cee1fa3658d2960d767d52: Status 404 returned error can't find the container with id 15a6a342db407286fdd4bb3be5307bb2ff6dbaa6a2cee1fa3658d2960d767d52 Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.534975 4922 scope.go:117] "RemoveContainer" containerID="4c94b7d931beaa31868228e1f15ba327334a2d49d32e1bac4297f9cc03a469ec" Nov 28 07:15:44 crc kubenswrapper[4922]: I1128 07:15:44.537547 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.411578 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" path="/var/lib/kubelet/pods/e07cd6b9-6f1b-40ed-97ea-f81c981b6967/volumes" Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.446542 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4be2de-2f46-4982-8cd6-b73888d293af","Type":"ContainerStarted","Data":"794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c"} Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.494014 4922 generic.go:334] "Generic (PLEG): container finished" podID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" containerID="53d3e5a061caada09b191e3d5182b2ddbedb79361efd462f2bf21800a501e89f" exitCode=0 Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.494071 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" event={"ID":"2ee8a355-4756-4c55-89fe-0bb0bea586e8","Type":"ContainerDied","Data":"53d3e5a061caada09b191e3d5182b2ddbedb79361efd462f2bf21800a501e89f"} Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.532432 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fj8qk" event={"ID":"04965f12-78a7-459e-bbd6-0c716678f561","Type":"ContainerStarted","Data":"ef4aee34e3a95cc67545ce7f5bd5bb7c06099bf473d6fbe68964bb534c89632c"} Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.532484 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fj8qk" event={"ID":"04965f12-78a7-459e-bbd6-0c716678f561","Type":"ContainerStarted","Data":"bb7415145df7ae011c13c758e24fb0ce9375e5f0ac2a4b1c23ca7c100b858510"} Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.538356 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dcbb1549-6821-4dd8-b4b9-df263325b1d0","Type":"ContainerStarted","Data":"79fd9965bc61deb2017fc22a828a5ec23dbee684022aab9d5c8d2f0d4159ae4d"} Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.538395 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dcbb1549-6821-4dd8-b4b9-df263325b1d0","Type":"ContainerStarted","Data":"8c2e60ed7c1c83a5a6517b25eee5187ea591bb8b871d8cb73dbf156a6918a724"} Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.544261 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"acf55119-1851-4271-ae26-4d21ccf6552d","Type":"ContainerStarted","Data":"4130053fe42b4ca6e08342bba09d7c25519dbb5f79b2b3e2edade0ae74182105"} Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.544306 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"acf55119-1851-4271-ae26-4d21ccf6552d","Type":"ContainerStarted","Data":"15a6a342db407286fdd4bb3be5307bb2ff6dbaa6a2cee1fa3658d2960d767d52"} Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.564782 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-fj8qk" podStartSLOduration=16.564766618 podStartE2EDuration="16.564766618s" podCreationTimestamp="2025-11-28 07:15:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:45.549825551 +0000 UTC m=+1390.470221143" watchObservedRunningTime="2025-11-28 07:15:45.564766618 +0000 UTC m=+1390.485162190" Nov 28 07:15:45 crc kubenswrapper[4922]: I1128 07:15:45.935822 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.082265 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbj4m\" (UniqueName: \"kubernetes.io/projected/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-kube-api-access-lbj4m\") pod \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.082391 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-combined-ca-bundle\") pod \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.082455 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-config\") pod \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\" (UID: \"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3\") " Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.091421 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-kube-api-access-lbj4m" (OuterVolumeSpecName: "kube-api-access-lbj4m") pod "4c7a8e1f-5f3c-451e-b407-6afb6d5443e3" (UID: "4c7a8e1f-5f3c-451e-b407-6afb6d5443e3"). InnerVolumeSpecName "kube-api-access-lbj4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.108736 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-config" (OuterVolumeSpecName: "config") pod "4c7a8e1f-5f3c-451e-b407-6afb6d5443e3" (UID: "4c7a8e1f-5f3c-451e-b407-6afb6d5443e3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.125062 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c7a8e1f-5f3c-451e-b407-6afb6d5443e3" (UID: "4c7a8e1f-5f3c-451e-b407-6afb6d5443e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.184329 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbj4m\" (UniqueName: \"kubernetes.io/projected/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-kube-api-access-lbj4m\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.184356 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.184365 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.553446 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cj2fg" event={"ID":"4c7a8e1f-5f3c-451e-b407-6afb6d5443e3","Type":"ContainerDied","Data":"f0bc20c1c25aad3398a6e80cba60b7e871940515455fc975b96a00ae9502aa74"} Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.553707 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0bc20c1c25aad3398a6e80cba60b7e871940515455fc975b96a00ae9502aa74" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.553762 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cj2fg" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.556180 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"acf55119-1851-4271-ae26-4d21ccf6552d","Type":"ContainerStarted","Data":"d5aa29ff2c40d00537a99d8e08387389cf91fbc821105ee1446caf24b221c819"} Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.556336 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" containerName="glance-log" containerID="cri-o://4130053fe42b4ca6e08342bba09d7c25519dbb5f79b2b3e2edade0ae74182105" gracePeriod=30 Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.556411 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" containerName="glance-httpd" containerID="cri-o://d5aa29ff2c40d00537a99d8e08387389cf91fbc821105ee1446caf24b221c819" gracePeriod=30 Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.564840 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" event={"ID":"2ee8a355-4756-4c55-89fe-0bb0bea586e8","Type":"ContainerStarted","Data":"177756b878e137b89edcfdcd94fc75813e7ec2a27193f00e0237e635f766efd8"} Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.565798 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.568818 4922 generic.go:334] "Generic (PLEG): container finished" podID="086c6cef-6f21-46b9-ace7-f06ffff84fb3" containerID="6272661054f8024c9e3c8eadc23e10a3e6ff363967adc44a0dd8e60f1e941455" exitCode=0 Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.568873 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gc24f" event={"ID":"086c6cef-6f21-46b9-ace7-f06ffff84fb3","Type":"ContainerDied","Data":"6272661054f8024c9e3c8eadc23e10a3e6ff363967adc44a0dd8e60f1e941455"} Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.582931 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dcbb1549-6821-4dd8-b4b9-df263325b1d0","Type":"ContainerStarted","Data":"98ed3bffee1d607fa511d9f904ebbfa716843f1dbe2ea6cce8781a45673ea183"} Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.582923 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerName="glance-log" containerID="cri-o://79fd9965bc61deb2017fc22a828a5ec23dbee684022aab9d5c8d2f0d4159ae4d" gracePeriod=30 Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.583085 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerName="glance-httpd" containerID="cri-o://98ed3bffee1d607fa511d9f904ebbfa716843f1dbe2ea6cce8781a45673ea183" gracePeriod=30 Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.590611 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=17.59058191 podStartE2EDuration="17.59058191s" podCreationTimestamp="2025-11-28 07:15:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:46.584500609 +0000 UTC m=+1391.504896191" watchObservedRunningTime="2025-11-28 07:15:46.59058191 +0000 UTC m=+1391.510977522" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.639055 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" podStartSLOduration=17.639034339 podStartE2EDuration="17.639034339s" podCreationTimestamp="2025-11-28 07:15:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:46.628921951 +0000 UTC m=+1391.549317533" watchObservedRunningTime="2025-11-28 07:15:46.639034339 +0000 UTC m=+1391.559429921" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.687817 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=17.687801277 podStartE2EDuration="17.687801277s" podCreationTimestamp="2025-11-28 07:15:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:46.673975339 +0000 UTC m=+1391.594370921" watchObservedRunningTime="2025-11-28 07:15:46.687801277 +0000 UTC m=+1391.608196859" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.696505 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f5d458b55-bzzwb"] Nov 28 07:15:46 crc kubenswrapper[4922]: E1128 07:15:46.737465 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podacf55119_1851_4271_ae26_4d21ccf6552d.slice/crio-4130053fe42b4ca6e08342bba09d7c25519dbb5f79b2b3e2edade0ae74182105.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c7a8e1f_5f3c_451e_b407_6afb6d5443e3.slice/crio-f0bc20c1c25aad3398a6e80cba60b7e871940515455fc975b96a00ae9502aa74\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c7a8e1f_5f3c_451e_b407_6afb6d5443e3.slice\": RecentStats: unable to find data in memory cache]" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.752743 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f8dc44d89-v7d7h"] Nov 28 07:15:46 crc kubenswrapper[4922]: E1128 07:15:46.755411 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c7a8e1f-5f3c-451e-b407-6afb6d5443e3" containerName="neutron-db-sync" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.755436 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c7a8e1f-5f3c-451e-b407-6afb6d5443e3" containerName="neutron-db-sync" Nov 28 07:15:46 crc kubenswrapper[4922]: E1128 07:15:46.755454 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="init" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.755471 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="init" Nov 28 07:15:46 crc kubenswrapper[4922]: E1128 07:15:46.755484 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="dnsmasq-dns" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.755492 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="dnsmasq-dns" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.756925 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="e07cd6b9-6f1b-40ed-97ea-f81c981b6967" containerName="dnsmasq-dns" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.756946 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c7a8e1f-5f3c-451e-b407-6afb6d5443e3" containerName="neutron-db-sync" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.757835 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.797682 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f8dc44d89-v7d7h"] Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.798595 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-config\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.798675 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-swift-storage-0\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.798708 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-svc\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.798746 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjw5n\" (UniqueName: \"kubernetes.io/projected/7decb3d6-6624-42fe-9155-4b75224ca2f7-kube-api-access-rjw5n\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.798775 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-sb\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.798803 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-nb\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.876322 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-64bdbc5658-v8ngg"] Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.878623 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.880183 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.881302 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.881462 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-r6kx9" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.881871 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.890795 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64bdbc5658-v8ngg"] Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.904549 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-swift-storage-0\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.904636 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-svc\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.904716 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjw5n\" (UniqueName: \"kubernetes.io/projected/7decb3d6-6624-42fe-9155-4b75224ca2f7-kube-api-access-rjw5n\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.904772 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-sb\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.904827 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-nb\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.905000 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-config\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.906145 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-config\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.907281 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-swift-storage-0\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.907580 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-sb\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.907854 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-svc\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.908134 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-nb\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:46 crc kubenswrapper[4922]: I1128 07:15:46.939415 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjw5n\" (UniqueName: \"kubernetes.io/projected/7decb3d6-6624-42fe-9155-4b75224ca2f7-kube-api-access-rjw5n\") pod \"dnsmasq-dns-f8dc44d89-v7d7h\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.008847 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-httpd-config\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.009064 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-ovndb-tls-certs\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.009087 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h4zp\" (UniqueName: \"kubernetes.io/projected/0a307c19-4be2-44f2-8034-00ebfa265aac-kube-api-access-9h4zp\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.009136 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-config\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.009164 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-combined-ca-bundle\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.110975 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-ovndb-tls-certs\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.111047 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h4zp\" (UniqueName: \"kubernetes.io/projected/0a307c19-4be2-44f2-8034-00ebfa265aac-kube-api-access-9h4zp\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.111077 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-config\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.111108 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-combined-ca-bundle\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.111161 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-httpd-config\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.116053 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-combined-ca-bundle\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.119445 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-httpd-config\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.120009 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-config\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.120204 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-ovndb-tls-certs\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.130995 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h4zp\" (UniqueName: \"kubernetes.io/projected/0a307c19-4be2-44f2-8034-00ebfa265aac-kube-api-access-9h4zp\") pod \"neutron-64bdbc5658-v8ngg\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.144624 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.391390 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.594517 4922 generic.go:334] "Generic (PLEG): container finished" podID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerID="98ed3bffee1d607fa511d9f904ebbfa716843f1dbe2ea6cce8781a45673ea183" exitCode=0 Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.594557 4922 generic.go:334] "Generic (PLEG): container finished" podID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerID="79fd9965bc61deb2017fc22a828a5ec23dbee684022aab9d5c8d2f0d4159ae4d" exitCode=143 Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.594601 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dcbb1549-6821-4dd8-b4b9-df263325b1d0","Type":"ContainerDied","Data":"98ed3bffee1d607fa511d9f904ebbfa716843f1dbe2ea6cce8781a45673ea183"} Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.594626 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dcbb1549-6821-4dd8-b4b9-df263325b1d0","Type":"ContainerDied","Data":"79fd9965bc61deb2017fc22a828a5ec23dbee684022aab9d5c8d2f0d4159ae4d"} Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.599568 4922 generic.go:334] "Generic (PLEG): container finished" podID="acf55119-1851-4271-ae26-4d21ccf6552d" containerID="d5aa29ff2c40d00537a99d8e08387389cf91fbc821105ee1446caf24b221c819" exitCode=0 Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.599610 4922 generic.go:334] "Generic (PLEG): container finished" podID="acf55119-1851-4271-ae26-4d21ccf6552d" containerID="4130053fe42b4ca6e08342bba09d7c25519dbb5f79b2b3e2edade0ae74182105" exitCode=143 Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.600240 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"acf55119-1851-4271-ae26-4d21ccf6552d","Type":"ContainerDied","Data":"d5aa29ff2c40d00537a99d8e08387389cf91fbc821105ee1446caf24b221c819"} Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.600288 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"acf55119-1851-4271-ae26-4d21ccf6552d","Type":"ContainerDied","Data":"4130053fe42b4ca6e08342bba09d7c25519dbb5f79b2b3e2edade0ae74182105"} Nov 28 07:15:47 crc kubenswrapper[4922]: I1128 07:15:47.683309 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f8dc44d89-v7d7h"] Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.011289 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.135836 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-config-data\") pod \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.136029 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/086c6cef-6f21-46b9-ace7-f06ffff84fb3-logs\") pod \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.136094 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bfrw\" (UniqueName: \"kubernetes.io/projected/086c6cef-6f21-46b9-ace7-f06ffff84fb3-kube-api-access-2bfrw\") pod \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.136136 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-scripts\") pod \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.136180 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-combined-ca-bundle\") pod \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\" (UID: \"086c6cef-6f21-46b9-ace7-f06ffff84fb3\") " Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.149514 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/086c6cef-6f21-46b9-ace7-f06ffff84fb3-logs" (OuterVolumeSpecName: "logs") pod "086c6cef-6f21-46b9-ace7-f06ffff84fb3" (UID: "086c6cef-6f21-46b9-ace7-f06ffff84fb3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.157527 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/086c6cef-6f21-46b9-ace7-f06ffff84fb3-kube-api-access-2bfrw" (OuterVolumeSpecName: "kube-api-access-2bfrw") pod "086c6cef-6f21-46b9-ace7-f06ffff84fb3" (UID: "086c6cef-6f21-46b9-ace7-f06ffff84fb3"). InnerVolumeSpecName "kube-api-access-2bfrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.165462 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-scripts" (OuterVolumeSpecName: "scripts") pod "086c6cef-6f21-46b9-ace7-f06ffff84fb3" (UID: "086c6cef-6f21-46b9-ace7-f06ffff84fb3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.175201 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "086c6cef-6f21-46b9-ace7-f06ffff84fb3" (UID: "086c6cef-6f21-46b9-ace7-f06ffff84fb3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.201130 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-config-data" (OuterVolumeSpecName: "config-data") pod "086c6cef-6f21-46b9-ace7-f06ffff84fb3" (UID: "086c6cef-6f21-46b9-ace7-f06ffff84fb3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.242663 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/086c6cef-6f21-46b9-ace7-f06ffff84fb3-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.242701 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bfrw\" (UniqueName: \"kubernetes.io/projected/086c6cef-6f21-46b9-ace7-f06ffff84fb3-kube-api-access-2bfrw\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.242716 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.242728 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.242741 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/086c6cef-6f21-46b9-ace7-f06ffff84fb3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.544799 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64bdbc5658-v8ngg"] Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.614056 4922 generic.go:334] "Generic (PLEG): container finished" podID="7decb3d6-6624-42fe-9155-4b75224ca2f7" containerID="3fcad4ad664b96b8b0d86552e09f9a89fbbaceb658e95144e93f0e722e8345a9" exitCode=0 Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.614118 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" event={"ID":"7decb3d6-6624-42fe-9155-4b75224ca2f7","Type":"ContainerDied","Data":"3fcad4ad664b96b8b0d86552e09f9a89fbbaceb658e95144e93f0e722e8345a9"} Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.614143 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" event={"ID":"7decb3d6-6624-42fe-9155-4b75224ca2f7","Type":"ContainerStarted","Data":"ea82a31598be35630309439df235e312a7df5736861f78d331eb78effec5cded"} Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.616430 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gc24f" event={"ID":"086c6cef-6f21-46b9-ace7-f06ffff84fb3","Type":"ContainerDied","Data":"35f62c0a38a6d29c6420d875471fe7250ac35210090479b2898d208c97208c2e"} Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.616459 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35f62c0a38a6d29c6420d875471fe7250ac35210090479b2898d208c97208c2e" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.616502 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gc24f" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.616505 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" podUID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" containerName="dnsmasq-dns" containerID="cri-o://177756b878e137b89edcfdcd94fc75813e7ec2a27193f00e0237e635f766efd8" gracePeriod=10 Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.742599 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5f78c88b9d-zp4nm"] Nov 28 07:15:48 crc kubenswrapper[4922]: E1128 07:15:48.743096 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="086c6cef-6f21-46b9-ace7-f06ffff84fb3" containerName="placement-db-sync" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.743175 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="086c6cef-6f21-46b9-ace7-f06ffff84fb3" containerName="placement-db-sync" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.743516 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="086c6cef-6f21-46b9-ace7-f06ffff84fb3" containerName="placement-db-sync" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.744478 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.753275 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.753419 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.753500 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.753610 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-9rnmt" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.753797 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.773595 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5f78c88b9d-zp4nm"] Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.849674 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-scripts\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.849733 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02d26a97-d447-4f76-90ed-9357e343cd91-logs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.849753 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-public-tls-certs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.849816 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-combined-ca-bundle\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.849876 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcgtm\" (UniqueName: \"kubernetes.io/projected/02d26a97-d447-4f76-90ed-9357e343cd91-kube-api-access-pcgtm\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.849896 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-config-data\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.849953 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-internal-tls-certs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.951927 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-combined-ca-bundle\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.951995 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcgtm\" (UniqueName: \"kubernetes.io/projected/02d26a97-d447-4f76-90ed-9357e343cd91-kube-api-access-pcgtm\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.952017 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-config-data\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.952061 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-internal-tls-certs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.952104 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-scripts\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.952120 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02d26a97-d447-4f76-90ed-9357e343cd91-logs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.952138 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-public-tls-certs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.953863 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02d26a97-d447-4f76-90ed-9357e343cd91-logs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.958163 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-internal-tls-certs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.961913 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-public-tls-certs\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.962034 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-combined-ca-bundle\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.962459 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-config-data\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.967331 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-scripts\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:48 crc kubenswrapper[4922]: I1128 07:15:48.975713 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcgtm\" (UniqueName: \"kubernetes.io/projected/02d26a97-d447-4f76-90ed-9357e343cd91-kube-api-access-pcgtm\") pod \"placement-5f78c88b9d-zp4nm\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.138367 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.641680 4922 generic.go:334] "Generic (PLEG): container finished" podID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" containerID="177756b878e137b89edcfdcd94fc75813e7ec2a27193f00e0237e635f766efd8" exitCode=0 Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.641755 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" event={"ID":"2ee8a355-4756-4c55-89fe-0bb0bea586e8","Type":"ContainerDied","Data":"177756b878e137b89edcfdcd94fc75813e7ec2a27193f00e0237e635f766efd8"} Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.643181 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-85487d674f-dfq9s"] Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.645802 4922 generic.go:334] "Generic (PLEG): container finished" podID="04965f12-78a7-459e-bbd6-0c716678f561" containerID="ef4aee34e3a95cc67545ce7f5bd5bb7c06099bf473d6fbe68964bb534c89632c" exitCode=0 Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.647525 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fj8qk" event={"ID":"04965f12-78a7-459e-bbd6-0c716678f561","Type":"ContainerDied","Data":"ef4aee34e3a95cc67545ce7f5bd5bb7c06099bf473d6fbe68964bb534c89632c"} Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.648003 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.650964 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.651131 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.658601 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85487d674f-dfq9s"] Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.764577 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pp9n\" (UniqueName: \"kubernetes.io/projected/cf191164-20d6-4d60-b111-6373616d9622-kube-api-access-2pp9n\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.764665 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-ovndb-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.764760 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-httpd-config\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.765038 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-combined-ca-bundle\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.765120 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-public-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.765152 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-config\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.765189 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-internal-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.866256 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pp9n\" (UniqueName: \"kubernetes.io/projected/cf191164-20d6-4d60-b111-6373616d9622-kube-api-access-2pp9n\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.866329 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-ovndb-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.866352 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-httpd-config\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.866414 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-combined-ca-bundle\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.866454 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-public-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.866477 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-config\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.866501 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-internal-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.871290 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-internal-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.877754 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-ovndb-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.889811 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-combined-ca-bundle\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.890925 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-public-tls-certs\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.891273 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-config\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.893680 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-httpd-config\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.899491 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pp9n\" (UniqueName: \"kubernetes.io/projected/cf191164-20d6-4d60-b111-6373616d9622-kube-api-access-2pp9n\") pod \"neutron-85487d674f-dfq9s\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:49 crc kubenswrapper[4922]: I1128 07:15:49.973427 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.090965 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.170817 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-logs\") pod \"acf55119-1851-4271-ae26-4d21ccf6552d\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.170892 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-scripts\") pod \"acf55119-1851-4271-ae26-4d21ccf6552d\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.170965 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwbxb\" (UniqueName: \"kubernetes.io/projected/acf55119-1851-4271-ae26-4d21ccf6552d-kube-api-access-zwbxb\") pod \"acf55119-1851-4271-ae26-4d21ccf6552d\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.171027 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-config-data\") pod \"acf55119-1851-4271-ae26-4d21ccf6552d\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.171065 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-httpd-run\") pod \"acf55119-1851-4271-ae26-4d21ccf6552d\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.171122 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"acf55119-1851-4271-ae26-4d21ccf6552d\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.171164 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-combined-ca-bundle\") pod \"acf55119-1851-4271-ae26-4d21ccf6552d\" (UID: \"acf55119-1851-4271-ae26-4d21ccf6552d\") " Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.171275 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-logs" (OuterVolumeSpecName: "logs") pod "acf55119-1851-4271-ae26-4d21ccf6552d" (UID: "acf55119-1851-4271-ae26-4d21ccf6552d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.171443 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "acf55119-1851-4271-ae26-4d21ccf6552d" (UID: "acf55119-1851-4271-ae26-4d21ccf6552d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.171553 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.171565 4922 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/acf55119-1851-4271-ae26-4d21ccf6552d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.174742 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-scripts" (OuterVolumeSpecName: "scripts") pod "acf55119-1851-4271-ae26-4d21ccf6552d" (UID: "acf55119-1851-4271-ae26-4d21ccf6552d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.176383 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "acf55119-1851-4271-ae26-4d21ccf6552d" (UID: "acf55119-1851-4271-ae26-4d21ccf6552d"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.182413 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acf55119-1851-4271-ae26-4d21ccf6552d-kube-api-access-zwbxb" (OuterVolumeSpecName: "kube-api-access-zwbxb") pod "acf55119-1851-4271-ae26-4d21ccf6552d" (UID: "acf55119-1851-4271-ae26-4d21ccf6552d"). InnerVolumeSpecName "kube-api-access-zwbxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.200587 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "acf55119-1851-4271-ae26-4d21ccf6552d" (UID: "acf55119-1851-4271-ae26-4d21ccf6552d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.223853 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-config-data" (OuterVolumeSpecName: "config-data") pod "acf55119-1851-4271-ae26-4d21ccf6552d" (UID: "acf55119-1851-4271-ae26-4d21ccf6552d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.273267 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.273307 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwbxb\" (UniqueName: \"kubernetes.io/projected/acf55119-1851-4271-ae26-4d21ccf6552d-kube-api-access-zwbxb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.273324 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.273372 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.273385 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/acf55119-1851-4271-ae26-4d21ccf6552d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.296512 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.374650 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.658969 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"acf55119-1851-4271-ae26-4d21ccf6552d","Type":"ContainerDied","Data":"15a6a342db407286fdd4bb3be5307bb2ff6dbaa6a2cee1fa3658d2960d767d52"} Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.659050 4922 scope.go:117] "RemoveContainer" containerID="d5aa29ff2c40d00537a99d8e08387389cf91fbc821105ee1446caf24b221c819" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.658992 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.714553 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.727655 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.743310 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:50 crc kubenswrapper[4922]: E1128 07:15:50.743847 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" containerName="glance-httpd" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.743863 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" containerName="glance-httpd" Nov 28 07:15:50 crc kubenswrapper[4922]: E1128 07:15:50.743886 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" containerName="glance-log" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.743897 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" containerName="glance-log" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.744165 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" containerName="glance-log" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.744181 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" containerName="glance-httpd" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.745651 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.751114 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.751767 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.752379 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.884479 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.884536 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.884593 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.884675 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.884737 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scqvh\" (UniqueName: \"kubernetes.io/projected/6b683f38-454d-454f-8e2f-66270d8b6ad4-kube-api-access-scqvh\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.884761 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-logs\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.884790 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.884818 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.986357 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.986456 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.986504 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scqvh\" (UniqueName: \"kubernetes.io/projected/6b683f38-454d-454f-8e2f-66270d8b6ad4-kube-api-access-scqvh\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.986520 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-logs\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.986544 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.986564 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.986596 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.986620 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.987266 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-logs\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.991992 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.992244 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.992462 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 28 07:15:50 crc kubenswrapper[4922]: I1128 07:15:50.994605 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:51 crc kubenswrapper[4922]: I1128 07:15:51.005461 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:51 crc kubenswrapper[4922]: I1128 07:15:51.006835 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:51 crc kubenswrapper[4922]: I1128 07:15:51.011650 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scqvh\" (UniqueName: \"kubernetes.io/projected/6b683f38-454d-454f-8e2f-66270d8b6ad4-kube-api-access-scqvh\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:51 crc kubenswrapper[4922]: I1128 07:15:51.027835 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:15:51 crc kubenswrapper[4922]: I1128 07:15:51.084430 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:15:51 crc kubenswrapper[4922]: I1128 07:15:51.409131 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acf55119-1851-4271-ae26-4d21ccf6552d" path="/var/lib/kubelet/pods/acf55119-1851-4271-ae26-4d21ccf6552d/volumes" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.115475 4922 scope.go:117] "RemoveContainer" containerID="4130053fe42b4ca6e08342bba09d7c25519dbb5f79b2b3e2edade0ae74182105" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.138713 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.311351 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-combined-ca-bundle\") pod \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.311464 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-logs\") pod \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.311485 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4z656\" (UniqueName: \"kubernetes.io/projected/dcbb1549-6821-4dd8-b4b9-df263325b1d0-kube-api-access-4z656\") pod \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.311514 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-config-data\") pod \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.311605 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.311664 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-httpd-run\") pod \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.311690 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-scripts\") pod \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\" (UID: \"dcbb1549-6821-4dd8-b4b9-df263325b1d0\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.313553 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-logs" (OuterVolumeSpecName: "logs") pod "dcbb1549-6821-4dd8-b4b9-df263325b1d0" (UID: "dcbb1549-6821-4dd8-b4b9-df263325b1d0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.313788 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "dcbb1549-6821-4dd8-b4b9-df263325b1d0" (UID: "dcbb1549-6821-4dd8-b4b9-df263325b1d0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.376460 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "dcbb1549-6821-4dd8-b4b9-df263325b1d0" (UID: "dcbb1549-6821-4dd8-b4b9-df263325b1d0"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.383949 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-scripts" (OuterVolumeSpecName: "scripts") pod "dcbb1549-6821-4dd8-b4b9-df263325b1d0" (UID: "dcbb1549-6821-4dd8-b4b9-df263325b1d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.392750 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcbb1549-6821-4dd8-b4b9-df263325b1d0-kube-api-access-4z656" (OuterVolumeSpecName: "kube-api-access-4z656") pod "dcbb1549-6821-4dd8-b4b9-df263325b1d0" (UID: "dcbb1549-6821-4dd8-b4b9-df263325b1d0"). InnerVolumeSpecName "kube-api-access-4z656". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.413669 4922 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.413696 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.413705 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dcbb1549-6821-4dd8-b4b9-df263325b1d0-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.413714 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4z656\" (UniqueName: \"kubernetes.io/projected/dcbb1549-6821-4dd8-b4b9-df263325b1d0-kube-api-access-4z656\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.413736 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.418920 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dcbb1549-6821-4dd8-b4b9-df263325b1d0" (UID: "dcbb1549-6821-4dd8-b4b9-df263325b1d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.478326 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.483955 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.513066 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-config-data" (OuterVolumeSpecName: "config-data") pod "dcbb1549-6821-4dd8-b4b9-df263325b1d0" (UID: "dcbb1549-6821-4dd8-b4b9-df263325b1d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.515945 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.515963 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.515975 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dcbb1549-6821-4dd8-b4b9-df263325b1d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.521166 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.618892 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-scripts\") pod \"04965f12-78a7-459e-bbd6-0c716678f561\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.618988 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59l5n\" (UniqueName: \"kubernetes.io/projected/04965f12-78a7-459e-bbd6-0c716678f561-kube-api-access-59l5n\") pod \"04965f12-78a7-459e-bbd6-0c716678f561\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619026 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-swift-storage-0\") pod \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619049 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-nb\") pod \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619085 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-combined-ca-bundle\") pod \"04965f12-78a7-459e-bbd6-0c716678f561\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619111 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-config-data\") pod \"04965f12-78a7-459e-bbd6-0c716678f561\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619165 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-config\") pod \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619244 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjktw\" (UniqueName: \"kubernetes.io/projected/2ee8a355-4756-4c55-89fe-0bb0bea586e8-kube-api-access-gjktw\") pod \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619267 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-svc\") pod \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619294 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-sb\") pod \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\" (UID: \"2ee8a355-4756-4c55-89fe-0bb0bea586e8\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619328 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-fernet-keys\") pod \"04965f12-78a7-459e-bbd6-0c716678f561\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.619346 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-credential-keys\") pod \"04965f12-78a7-459e-bbd6-0c716678f561\" (UID: \"04965f12-78a7-459e-bbd6-0c716678f561\") " Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.623368 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "04965f12-78a7-459e-bbd6-0c716678f561" (UID: "04965f12-78a7-459e-bbd6-0c716678f561"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.629181 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "04965f12-78a7-459e-bbd6-0c716678f561" (UID: "04965f12-78a7-459e-bbd6-0c716678f561"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.629794 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-scripts" (OuterVolumeSpecName: "scripts") pod "04965f12-78a7-459e-bbd6-0c716678f561" (UID: "04965f12-78a7-459e-bbd6-0c716678f561"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.633206 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04965f12-78a7-459e-bbd6-0c716678f561-kube-api-access-59l5n" (OuterVolumeSpecName: "kube-api-access-59l5n") pod "04965f12-78a7-459e-bbd6-0c716678f561" (UID: "04965f12-78a7-459e-bbd6-0c716678f561"). InnerVolumeSpecName "kube-api-access-59l5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.635525 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ee8a355-4756-4c55-89fe-0bb0bea586e8-kube-api-access-gjktw" (OuterVolumeSpecName: "kube-api-access-gjktw") pod "2ee8a355-4756-4c55-89fe-0bb0bea586e8" (UID: "2ee8a355-4756-4c55-89fe-0bb0bea586e8"). InnerVolumeSpecName "kube-api-access-gjktw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.662909 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-config-data" (OuterVolumeSpecName: "config-data") pod "04965f12-78a7-459e-bbd6-0c716678f561" (UID: "04965f12-78a7-459e-bbd6-0c716678f561"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.693113 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2ee8a355-4756-4c55-89fe-0bb0bea586e8" (UID: "2ee8a355-4756-4c55-89fe-0bb0bea586e8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.706918 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bdbc5658-v8ngg" event={"ID":"0a307c19-4be2-44f2-8034-00ebfa265aac","Type":"ContainerStarted","Data":"0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627"} Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.707246 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bdbc5658-v8ngg" event={"ID":"0a307c19-4be2-44f2-8034-00ebfa265aac","Type":"ContainerStarted","Data":"845fe27f408879e3d827a81fb1edb1197e5d46171263a5d932dba685bf13577b"} Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.709425 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dcbb1549-6821-4dd8-b4b9-df263325b1d0","Type":"ContainerDied","Data":"8c2e60ed7c1c83a5a6517b25eee5187ea591bb8b871d8cb73dbf156a6918a724"} Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.709433 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.709705 4922 scope.go:117] "RemoveContainer" containerID="98ed3bffee1d607fa511d9f904ebbfa716843f1dbe2ea6cce8781a45673ea183" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.714528 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" event={"ID":"7decb3d6-6624-42fe-9155-4b75224ca2f7","Type":"ContainerStarted","Data":"e7a8226902f6423b5db458b6518ebb22657ad05d0256b51c6da3053ebf656920"} Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.714883 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "04965f12-78a7-459e-bbd6-0c716678f561" (UID: "04965f12-78a7-459e-bbd6-0c716678f561"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.715157 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.715791 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-config" (OuterVolumeSpecName: "config") pod "2ee8a355-4756-4c55-89fe-0bb0bea586e8" (UID: "2ee8a355-4756-4c55-89fe-0bb0bea586e8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.720933 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4be2de-2f46-4982-8cd6-b73888d293af","Type":"ContainerStarted","Data":"78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987"} Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721378 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721403 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721412 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721423 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjktw\" (UniqueName: \"kubernetes.io/projected/2ee8a355-4756-4c55-89fe-0bb0bea586e8-kube-api-access-gjktw\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721433 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721441 4922 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721449 4922 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721457 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04965f12-78a7-459e-bbd6-0c716678f561-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.721491 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59l5n\" (UniqueName: \"kubernetes.io/projected/04965f12-78a7-459e-bbd6-0c716678f561-kube-api-access-59l5n\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.723292 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.723300 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f5d458b55-bzzwb" event={"ID":"2ee8a355-4756-4c55-89fe-0bb0bea586e8","Type":"ContainerDied","Data":"666a40c7f897d54e54d892595dec0d8d38ae26bac8cd5458898bf6a486bd3835"} Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.725421 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-fj8qk" event={"ID":"04965f12-78a7-459e-bbd6-0c716678f561","Type":"ContainerDied","Data":"bb7415145df7ae011c13c758e24fb0ce9375e5f0ac2a4b1c23ca7c100b858510"} Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.725445 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb7415145df7ae011c13c758e24fb0ce9375e5f0ac2a4b1c23ca7c100b858510" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.725486 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-fj8qk" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.725945 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2ee8a355-4756-4c55-89fe-0bb0bea586e8" (UID: "2ee8a355-4756-4c55-89fe-0bb0bea586e8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.727888 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2ee8a355-4756-4c55-89fe-0bb0bea586e8" (UID: "2ee8a355-4756-4c55-89fe-0bb0bea586e8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.739030 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" podStartSLOduration=6.739013526 podStartE2EDuration="6.739013526s" podCreationTimestamp="2025-11-28 07:15:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:52.729485363 +0000 UTC m=+1397.649880955" watchObservedRunningTime="2025-11-28 07:15:52.739013526 +0000 UTC m=+1397.659409108" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.744502 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2ee8a355-4756-4c55-89fe-0bb0bea586e8" (UID: "2ee8a355-4756-4c55-89fe-0bb0bea586e8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.756696 4922 scope.go:117] "RemoveContainer" containerID="79fd9965bc61deb2017fc22a828a5ec23dbee684022aab9d5c8d2f0d4159ae4d" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.777818 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.827407 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.827446 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.827461 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ee8a355-4756-4c55-89fe-0bb0bea586e8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.830303 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.850480 4922 scope.go:117] "RemoveContainer" containerID="177756b878e137b89edcfdcd94fc75813e7ec2a27193f00e0237e635f766efd8" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.857263 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:52 crc kubenswrapper[4922]: E1128 07:15:52.857736 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" containerName="init" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.857761 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" containerName="init" Nov 28 07:15:52 crc kubenswrapper[4922]: E1128 07:15:52.857798 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" containerName="dnsmasq-dns" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.857809 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" containerName="dnsmasq-dns" Nov 28 07:15:52 crc kubenswrapper[4922]: E1128 07:15:52.857827 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04965f12-78a7-459e-bbd6-0c716678f561" containerName="keystone-bootstrap" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.857835 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="04965f12-78a7-459e-bbd6-0c716678f561" containerName="keystone-bootstrap" Nov 28 07:15:52 crc kubenswrapper[4922]: E1128 07:15:52.857854 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerName="glance-httpd" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.857863 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerName="glance-httpd" Nov 28 07:15:52 crc kubenswrapper[4922]: E1128 07:15:52.857876 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerName="glance-log" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.857884 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerName="glance-log" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.858103 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="04965f12-78a7-459e-bbd6-0c716678f561" containerName="keystone-bootstrap" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.858124 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerName="glance-log" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.858146 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" containerName="dnsmasq-dns" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.858164 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" containerName="glance-httpd" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.859476 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.864863 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.865239 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.866370 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.875473 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5f78c88b9d-zp4nm"] Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.879953 4922 scope.go:117] "RemoveContainer" containerID="53d3e5a061caada09b191e3d5182b2ddbedb79361efd462f2bf21800a501e89f" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.929066 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.929113 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-scripts\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.929177 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.929213 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-config-data\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.929336 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.929364 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-logs\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.929383 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.929410 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w7t9\" (UniqueName: \"kubernetes.io/projected/adb38287-ea0b-4e02-8bda-8022f86b0d81-kube-api-access-4w7t9\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:52 crc kubenswrapper[4922]: I1128 07:15:52.946031 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.031244 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.031317 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-scripts\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.031377 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.031428 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-config-data\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.031454 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.031498 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-logs\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.031524 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.031564 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w7t9\" (UniqueName: \"kubernetes.io/projected/adb38287-ea0b-4e02-8bda-8022f86b0d81-kube-api-access-4w7t9\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.032690 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.033706 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-logs\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.033873 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.037858 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-config-data\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.040253 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85487d674f-dfq9s"] Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.041343 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.042749 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-scripts\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.044378 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.056132 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w7t9\" (UniqueName: \"kubernetes.io/projected/adb38287-ea0b-4e02-8bda-8022f86b0d81-kube-api-access-4w7t9\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.081058 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.179581 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.363762 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f5d458b55-bzzwb"] Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.375278 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f5d458b55-bzzwb"] Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.419125 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ee8a355-4756-4c55-89fe-0bb0bea586e8" path="/var/lib/kubelet/pods/2ee8a355-4756-4c55-89fe-0bb0bea586e8/volumes" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.420636 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcbb1549-6821-4dd8-b4b9-df263325b1d0" path="/var/lib/kubelet/pods/dcbb1549-6821-4dd8-b4b9-df263325b1d0/volumes" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.586377 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-c6c69b978-txpld"] Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.598705 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c6c69b978-txpld"] Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.598877 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.606714 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.606902 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.607050 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.607159 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.609045 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.611683 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-vx657" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.645051 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-config-data\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.645318 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-internal-tls-certs\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.645406 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzxc4\" (UniqueName: \"kubernetes.io/projected/81469087-f8d4-4499-a1e3-9fe103758289-kube-api-access-hzxc4\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.645495 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-credential-keys\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.645573 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-fernet-keys\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.645655 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-scripts\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.645721 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-combined-ca-bundle\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.645795 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-public-tls-certs\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.751376 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-internal-tls-certs\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.752904 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzxc4\" (UniqueName: \"kubernetes.io/projected/81469087-f8d4-4499-a1e3-9fe103758289-kube-api-access-hzxc4\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.752971 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-credential-keys\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.753034 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-fernet-keys\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.753120 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-scripts\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.753143 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-combined-ca-bundle\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.753184 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-public-tls-certs\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.753372 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-config-data\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.760697 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-combined-ca-bundle\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.760827 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-config-data\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.762531 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-internal-tls-certs\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.762889 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-credential-keys\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.762970 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-public-tls-certs\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.763186 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85487d674f-dfq9s" event={"ID":"cf191164-20d6-4d60-b111-6373616d9622","Type":"ContainerStarted","Data":"e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e"} Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.763336 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85487d674f-dfq9s" event={"ID":"cf191164-20d6-4d60-b111-6373616d9622","Type":"ContainerStarted","Data":"3f27de830fdbd6a4998b2e5549f7f0e951fce01690637c616637dc454dfa92fa"} Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.763541 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-scripts\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.764027 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-fernet-keys\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.770721 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6b683f38-454d-454f-8e2f-66270d8b6ad4","Type":"ContainerStarted","Data":"55bb3843894dfea8aca4a8c22ee1094563c3f3bcd3ef7a4d98e73b43e555cf5a"} Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.774804 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzxc4\" (UniqueName: \"kubernetes.io/projected/81469087-f8d4-4499-a1e3-9fe103758289-kube-api-access-hzxc4\") pod \"keystone-c6c69b978-txpld\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.779208 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bdbc5658-v8ngg" event={"ID":"0a307c19-4be2-44f2-8034-00ebfa265aac","Type":"ContainerStarted","Data":"ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113"} Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.779566 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.784766 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f78c88b9d-zp4nm" event={"ID":"02d26a97-d447-4f76-90ed-9357e343cd91","Type":"ContainerStarted","Data":"ca51681097b612d4dfe7b851461b5269b74ed8bfc135acb38f4eda642ba424ca"} Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.784817 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f78c88b9d-zp4nm" event={"ID":"02d26a97-d447-4f76-90ed-9357e343cd91","Type":"ContainerStarted","Data":"2007239de0a02e5d422915e862a8620464730ddc29632ad08e99aaf25724d88b"} Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.784830 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f78c88b9d-zp4nm" event={"ID":"02d26a97-d447-4f76-90ed-9357e343cd91","Type":"ContainerStarted","Data":"e011d58685304fd3c603589c2619a6c538d8044a6ab6cc02422272391c262951"} Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.784904 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.784956 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:15:53 crc kubenswrapper[4922]: W1128 07:15:53.817705 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadb38287_ea0b_4e02_8bda_8022f86b0d81.slice/crio-4a6bcc8bbc7babd809e734ad55eef302113a1ac8d71f673ed0f5c39689263abd WatchSource:0}: Error finding container 4a6bcc8bbc7babd809e734ad55eef302113a1ac8d71f673ed0f5c39689263abd: Status 404 returned error can't find the container with id 4a6bcc8bbc7babd809e734ad55eef302113a1ac8d71f673ed0f5c39689263abd Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.830624 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.838119 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-64bdbc5658-v8ngg" podStartSLOduration=7.838098798 podStartE2EDuration="7.838098798s" podCreationTimestamp="2025-11-28 07:15:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:53.818663201 +0000 UTC m=+1398.739058783" watchObservedRunningTime="2025-11-28 07:15:53.838098798 +0000 UTC m=+1398.758494380" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.847969 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5f78c88b9d-zp4nm" podStartSLOduration=5.847948651 podStartE2EDuration="5.847948651s" podCreationTimestamp="2025-11-28 07:15:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:53.844850028 +0000 UTC m=+1398.765245610" watchObservedRunningTime="2025-11-28 07:15:53.847948651 +0000 UTC m=+1398.768344233" Nov 28 07:15:53 crc kubenswrapper[4922]: I1128 07:15:53.927128 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:54 crc kubenswrapper[4922]: I1128 07:15:54.461918 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c6c69b978-txpld"] Nov 28 07:15:54 crc kubenswrapper[4922]: W1128 07:15:54.487424 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81469087_f8d4_4499_a1e3_9fe103758289.slice/crio-b2ce01c49b5e4a1a63faa18486a3af0a31159a3792bfda73d076aad0478560a8 WatchSource:0}: Error finding container b2ce01c49b5e4a1a63faa18486a3af0a31159a3792bfda73d076aad0478560a8: Status 404 returned error can't find the container with id b2ce01c49b5e4a1a63faa18486a3af0a31159a3792bfda73d076aad0478560a8 Nov 28 07:15:54 crc kubenswrapper[4922]: I1128 07:15:54.799759 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85487d674f-dfq9s" event={"ID":"cf191164-20d6-4d60-b111-6373616d9622","Type":"ContainerStarted","Data":"f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49"} Nov 28 07:15:54 crc kubenswrapper[4922]: I1128 07:15:54.800148 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:15:54 crc kubenswrapper[4922]: I1128 07:15:54.803205 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6b683f38-454d-454f-8e2f-66270d8b6ad4","Type":"ContainerStarted","Data":"f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40"} Nov 28 07:15:54 crc kubenswrapper[4922]: I1128 07:15:54.805421 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"adb38287-ea0b-4e02-8bda-8022f86b0d81","Type":"ContainerStarted","Data":"4a6bcc8bbc7babd809e734ad55eef302113a1ac8d71f673ed0f5c39689263abd"} Nov 28 07:15:54 crc kubenswrapper[4922]: I1128 07:15:54.806610 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c6c69b978-txpld" event={"ID":"81469087-f8d4-4499-a1e3-9fe103758289","Type":"ContainerStarted","Data":"b2ce01c49b5e4a1a63faa18486a3af0a31159a3792bfda73d076aad0478560a8"} Nov 28 07:15:54 crc kubenswrapper[4922]: I1128 07:15:54.827397 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-85487d674f-dfq9s" podStartSLOduration=5.827379029 podStartE2EDuration="5.827379029s" podCreationTimestamp="2025-11-28 07:15:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:54.817002933 +0000 UTC m=+1399.737398515" watchObservedRunningTime="2025-11-28 07:15:54.827379029 +0000 UTC m=+1399.747774611" Nov 28 07:15:55 crc kubenswrapper[4922]: I1128 07:15:55.832545 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"adb38287-ea0b-4e02-8bda-8022f86b0d81","Type":"ContainerStarted","Data":"b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c"} Nov 28 07:15:55 crc kubenswrapper[4922]: I1128 07:15:55.833830 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c6c69b978-txpld" event={"ID":"81469087-f8d4-4499-a1e3-9fe103758289","Type":"ContainerStarted","Data":"e14162c5f538b87ed64ebb17f2d96cca074bc89ff83ab9d46b7b609216c66fb3"} Nov 28 07:15:55 crc kubenswrapper[4922]: I1128 07:15:55.833887 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:15:55 crc kubenswrapper[4922]: I1128 07:15:55.836166 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6b683f38-454d-454f-8e2f-66270d8b6ad4","Type":"ContainerStarted","Data":"e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877"} Nov 28 07:15:55 crc kubenswrapper[4922]: I1128 07:15:55.873067 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-c6c69b978-txpld" podStartSLOduration=2.8730438 podStartE2EDuration="2.8730438s" podCreationTimestamp="2025-11-28 07:15:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:55.851100937 +0000 UTC m=+1400.771496519" watchObservedRunningTime="2025-11-28 07:15:55.8730438 +0000 UTC m=+1400.793439382" Nov 28 07:15:56 crc kubenswrapper[4922]: I1128 07:15:56.868643 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"adb38287-ea0b-4e02-8bda-8022f86b0d81","Type":"ContainerStarted","Data":"fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052"} Nov 28 07:15:56 crc kubenswrapper[4922]: I1128 07:15:56.896702 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.896686315 podStartE2EDuration="4.896686315s" podCreationTimestamp="2025-11-28 07:15:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:56.891545918 +0000 UTC m=+1401.811941500" watchObservedRunningTime="2025-11-28 07:15:56.896686315 +0000 UTC m=+1401.817081897" Nov 28 07:15:56 crc kubenswrapper[4922]: I1128 07:15:56.917101 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.917084488 podStartE2EDuration="6.917084488s" podCreationTimestamp="2025-11-28 07:15:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:15:56.908379196 +0000 UTC m=+1401.828774788" watchObservedRunningTime="2025-11-28 07:15:56.917084488 +0000 UTC m=+1401.837480070" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.153365 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.215963 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c8cb8df65-x8pfz"] Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.216176 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" podUID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" containerName="dnsmasq-dns" containerID="cri-o://9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1" gracePeriod=10 Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.312324 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.312380 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.727173 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.742095 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-sb\") pod \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.742259 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-dns-svc\") pod \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.742307 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-config\") pod \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.742430 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-nb\") pod \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.742454 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5524w\" (UniqueName: \"kubernetes.io/projected/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-kube-api-access-5524w\") pod \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\" (UID: \"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1\") " Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.747430 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-kube-api-access-5524w" (OuterVolumeSpecName: "kube-api-access-5524w") pod "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" (UID: "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1"). InnerVolumeSpecName "kube-api-access-5524w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.812158 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-config" (OuterVolumeSpecName: "config") pod "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" (UID: "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.848851 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" (UID: "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.848931 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5524w\" (UniqueName: \"kubernetes.io/projected/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-kube-api-access-5524w\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.848957 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.885765 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" (UID: "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.898838 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" (UID: "ebf19eb8-c582-4e2e-9a1c-e661e24bcae1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.928142 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-4p4rg" event={"ID":"2bf381d5-4211-44a0-8fd6-4b1c05fb690d","Type":"ContainerStarted","Data":"602b2fe4aa584b9689b82571f4d574d08a11fdfc974904fc9c89c5481f00ee22"} Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.948983 4922 generic.go:334] "Generic (PLEG): container finished" podID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" containerID="9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1" exitCode=0 Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.949315 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.950607 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.950638 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.950651 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.949344 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" event={"ID":"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1","Type":"ContainerDied","Data":"9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1"} Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.951285 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c8cb8df65-x8pfz" event={"ID":"ebf19eb8-c582-4e2e-9a1c-e661e24bcae1","Type":"ContainerDied","Data":"d7e825265081179f3512c1ec8f2eda2ccfaff80fc5afdc872bde7bef7294bc1d"} Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.951310 4922 scope.go:117] "RemoveContainer" containerID="9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1" Nov 28 07:15:57 crc kubenswrapper[4922]: I1128 07:15:57.953550 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-4p4rg" podStartSLOduration=2.110464216 podStartE2EDuration="38.953540144s" podCreationTimestamp="2025-11-28 07:15:19 +0000 UTC" firstStartedPulling="2025-11-28 07:15:20.124240706 +0000 UTC m=+1365.044636288" lastFinishedPulling="2025-11-28 07:15:56.967316634 +0000 UTC m=+1401.887712216" observedRunningTime="2025-11-28 07:15:57.952303231 +0000 UTC m=+1402.872698813" watchObservedRunningTime="2025-11-28 07:15:57.953540144 +0000 UTC m=+1402.873935716" Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.042081 4922 scope.go:117] "RemoveContainer" containerID="eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259" Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.055204 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c8cb8df65-x8pfz"] Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.063460 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c8cb8df65-x8pfz"] Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.088720 4922 scope.go:117] "RemoveContainer" containerID="9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1" Nov 28 07:15:58 crc kubenswrapper[4922]: E1128 07:15:58.089259 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1\": container with ID starting with 9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1 not found: ID does not exist" containerID="9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1" Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.089297 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1"} err="failed to get container status \"9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1\": rpc error: code = NotFound desc = could not find container \"9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1\": container with ID starting with 9912e5cfa24f6de7d2138276e2fb9effacb013246f74b63fe9501ed863a988d1 not found: ID does not exist" Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.089343 4922 scope.go:117] "RemoveContainer" containerID="eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259" Nov 28 07:15:58 crc kubenswrapper[4922]: E1128 07:15:58.089714 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259\": container with ID starting with eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259 not found: ID does not exist" containerID="eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259" Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.089760 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259"} err="failed to get container status \"eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259\": rpc error: code = NotFound desc = could not find container \"eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259\": container with ID starting with eb98327ae0a3ebf903f1cd3e265b3034a52c0419eb42fa5c9717c59a88af4259 not found: ID does not exist" Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.973211 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-4q5pb" event={"ID":"aa053525-2f00-4415-b9b5-35948c8e5038","Type":"ContainerStarted","Data":"08b04a1731e257cb827e2fa7f859061cea2670c54000e2d9698e3c70be4917ea"} Nov 28 07:15:58 crc kubenswrapper[4922]: I1128 07:15:58.990672 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-4q5pb" podStartSLOduration=2.745461239 podStartE2EDuration="39.990653438s" podCreationTimestamp="2025-11-28 07:15:19 +0000 UTC" firstStartedPulling="2025-11-28 07:15:20.860201006 +0000 UTC m=+1365.780596588" lastFinishedPulling="2025-11-28 07:15:58.105393215 +0000 UTC m=+1403.025788787" observedRunningTime="2025-11-28 07:15:58.986766364 +0000 UTC m=+1403.907161946" watchObservedRunningTime="2025-11-28 07:15:58.990653438 +0000 UTC m=+1403.911049030" Nov 28 07:15:59 crc kubenswrapper[4922]: I1128 07:15:59.407679 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" path="/var/lib/kubelet/pods/ebf19eb8-c582-4e2e-9a1c-e661e24bcae1/volumes" Nov 28 07:16:01 crc kubenswrapper[4922]: I1128 07:16:01.085590 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:01 crc kubenswrapper[4922]: I1128 07:16:01.085904 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:01 crc kubenswrapper[4922]: I1128 07:16:01.115494 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:01 crc kubenswrapper[4922]: I1128 07:16:01.147614 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:02 crc kubenswrapper[4922]: I1128 07:16:02.040570 4922 generic.go:334] "Generic (PLEG): container finished" podID="aa053525-2f00-4415-b9b5-35948c8e5038" containerID="08b04a1731e257cb827e2fa7f859061cea2670c54000e2d9698e3c70be4917ea" exitCode=0 Nov 28 07:16:02 crc kubenswrapper[4922]: I1128 07:16:02.041852 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-4q5pb" event={"ID":"aa053525-2f00-4415-b9b5-35948c8e5038","Type":"ContainerDied","Data":"08b04a1731e257cb827e2fa7f859061cea2670c54000e2d9698e3c70be4917ea"} Nov 28 07:16:02 crc kubenswrapper[4922]: I1128 07:16:02.041887 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:02 crc kubenswrapper[4922]: I1128 07:16:02.042321 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.070315 4922 generic.go:334] "Generic (PLEG): container finished" podID="2bf381d5-4211-44a0-8fd6-4b1c05fb690d" containerID="602b2fe4aa584b9689b82571f4d574d08a11fdfc974904fc9c89c5481f00ee22" exitCode=0 Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.070450 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-4p4rg" event={"ID":"2bf381d5-4211-44a0-8fd6-4b1c05fb690d","Type":"ContainerDied","Data":"602b2fe4aa584b9689b82571f4d574d08a11fdfc974904fc9c89c5481f00ee22"} Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.194633 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.194681 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.259149 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.291002 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.749329 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.867977 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxs2b\" (UniqueName: \"kubernetes.io/projected/aa053525-2f00-4415-b9b5-35948c8e5038-kube-api-access-sxs2b\") pod \"aa053525-2f00-4415-b9b5-35948c8e5038\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.868601 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-db-sync-config-data\") pod \"aa053525-2f00-4415-b9b5-35948c8e5038\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.868926 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-combined-ca-bundle\") pod \"aa053525-2f00-4415-b9b5-35948c8e5038\" (UID: \"aa053525-2f00-4415-b9b5-35948c8e5038\") " Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.876173 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "aa053525-2f00-4415-b9b5-35948c8e5038" (UID: "aa053525-2f00-4415-b9b5-35948c8e5038"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.877076 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa053525-2f00-4415-b9b5-35948c8e5038-kube-api-access-sxs2b" (OuterVolumeSpecName: "kube-api-access-sxs2b") pod "aa053525-2f00-4415-b9b5-35948c8e5038" (UID: "aa053525-2f00-4415-b9b5-35948c8e5038"). InnerVolumeSpecName "kube-api-access-sxs2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.910441 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aa053525-2f00-4415-b9b5-35948c8e5038" (UID: "aa053525-2f00-4415-b9b5-35948c8e5038"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.927240 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.940743 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.985073 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxs2b\" (UniqueName: \"kubernetes.io/projected/aa053525-2f00-4415-b9b5-35948c8e5038-kube-api-access-sxs2b\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.985117 4922 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:03 crc kubenswrapper[4922]: I1128 07:16:03.985133 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa053525-2f00-4415-b9b5-35948c8e5038-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:04 crc kubenswrapper[4922]: E1128 07:16:04.098547 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.098798 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-4q5pb" event={"ID":"aa053525-2f00-4415-b9b5-35948c8e5038","Type":"ContainerDied","Data":"3dc318bcfbd9f9a58f81f07d6f76a79bd21d4e8980189c900ee6d271fa7b4d98"} Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.098820 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3dc318bcfbd9f9a58f81f07d6f76a79bd21d4e8980189c900ee6d271fa7b4d98" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.098886 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-4q5pb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.100263 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.100296 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.335289 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5b7cbd995c-rwlzz"] Nov 28 07:16:04 crc kubenswrapper[4922]: E1128 07:16:04.336016 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa053525-2f00-4415-b9b5-35948c8e5038" containerName="barbican-db-sync" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.336032 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa053525-2f00-4415-b9b5-35948c8e5038" containerName="barbican-db-sync" Nov 28 07:16:04 crc kubenswrapper[4922]: E1128 07:16:04.336048 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" containerName="init" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.336056 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" containerName="init" Nov 28 07:16:04 crc kubenswrapper[4922]: E1128 07:16:04.336084 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" containerName="dnsmasq-dns" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.336092 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" containerName="dnsmasq-dns" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.336292 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebf19eb8-c582-4e2e-9a1c-e661e24bcae1" containerName="dnsmasq-dns" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.336305 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa053525-2f00-4415-b9b5-35948c8e5038" containerName="barbican-db-sync" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.337178 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.341807 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.341997 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-6g2s9" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.342089 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5b7cbd995c-rwlzz"] Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.342154 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.351443 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-84495d76c8-mkvcb"] Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.352910 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.355104 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.356915 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-84495d76c8-mkvcb"] Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.391495 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf96b7dc5-lfwbc"] Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.392879 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.408897 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8685dc8-7577-4076-8a5a-beba52e9bae7-logs\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.408967 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-combined-ca-bundle\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.409003 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g7dd\" (UniqueName: \"kubernetes.io/projected/f8685dc8-7577-4076-8a5a-beba52e9bae7-kube-api-access-4g7dd\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.409018 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data-custom\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.409045 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.435302 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf96b7dc5-lfwbc"] Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.492502 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-59488f5ff4-k2cvn"] Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.493884 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.496442 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.502509 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.511591 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.511718 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ece1333-c457-4099-bf00-1daa969a14dc-logs\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.511789 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8685dc8-7577-4076-8a5a-beba52e9bae7-logs\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.511863 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-swift-storage-0\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.511960 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-combined-ca-bundle\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512124 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z875h\" (UniqueName: \"kubernetes.io/projected/5ece1333-c457-4099-bf00-1daa969a14dc-kube-api-access-z875h\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512166 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g7dd\" (UniqueName: \"kubernetes.io/projected/f8685dc8-7577-4076-8a5a-beba52e9bae7-kube-api-access-4g7dd\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512191 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data-custom\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512253 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512284 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-nb\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512335 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk2gc\" (UniqueName: \"kubernetes.io/projected/a57aeefc-69e8-4b46-aeec-2aff4026621c-kube-api-access-bk2gc\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512393 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-svc\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512414 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-config\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512539 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-combined-ca-bundle\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512573 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8685dc8-7577-4076-8a5a-beba52e9bae7-logs\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512588 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data-custom\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.512651 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-sb\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.523835 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data-custom\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.524708 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-combined-ca-bundle\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.533997 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-59488f5ff4-k2cvn"] Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.536736 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.553950 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g7dd\" (UniqueName: \"kubernetes.io/projected/f8685dc8-7577-4076-8a5a-beba52e9bae7-kube-api-access-4g7dd\") pod \"barbican-worker-5b7cbd995c-rwlzz\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617273 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-scripts\") pod \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617326 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-config-data\") pod \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617377 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67j4r\" (UniqueName: \"kubernetes.io/projected/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-kube-api-access-67j4r\") pod \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617403 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-etc-machine-id\") pod \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617435 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-combined-ca-bundle\") pod \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617472 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-db-sync-config-data\") pod \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\" (UID: \"2bf381d5-4211-44a0-8fd6-4b1c05fb690d\") " Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617664 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc5rw\" (UniqueName: \"kubernetes.io/projected/69d50d02-732b-47a3-b342-ff1acbb0af7f-kube-api-access-tc5rw\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617700 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z875h\" (UniqueName: \"kubernetes.io/projected/5ece1333-c457-4099-bf00-1daa969a14dc-kube-api-access-z875h\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617730 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-nb\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617754 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk2gc\" (UniqueName: \"kubernetes.io/projected/a57aeefc-69e8-4b46-aeec-2aff4026621c-kube-api-access-bk2gc\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617776 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-svc\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617793 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-config\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617834 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-combined-ca-bundle\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617851 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-combined-ca-bundle\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617878 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data-custom\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617895 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69d50d02-732b-47a3-b342-ff1acbb0af7f-logs\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617912 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-sb\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617930 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data-custom\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617958 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.617981 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ece1333-c457-4099-bf00-1daa969a14dc-logs\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.618006 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-swift-storage-0\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.618025 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.619044 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-svc\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.625976 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-config\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.637249 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-nb\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.637306 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2bf381d5-4211-44a0-8fd6-4b1c05fb690d" (UID: "2bf381d5-4211-44a0-8fd6-4b1c05fb690d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.638794 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-sb\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.638893 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-scripts" (OuterVolumeSpecName: "scripts") pod "2bf381d5-4211-44a0-8fd6-4b1c05fb690d" (UID: "2bf381d5-4211-44a0-8fd6-4b1c05fb690d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.639171 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ece1333-c457-4099-bf00-1daa969a14dc-logs\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.639300 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-swift-storage-0\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.656551 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2bf381d5-4211-44a0-8fd6-4b1c05fb690d" (UID: "2bf381d5-4211-44a0-8fd6-4b1c05fb690d"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.666848 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data-custom\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.671360 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-kube-api-access-67j4r" (OuterVolumeSpecName: "kube-api-access-67j4r") pod "2bf381d5-4211-44a0-8fd6-4b1c05fb690d" (UID: "2bf381d5-4211-44a0-8fd6-4b1c05fb690d"). InnerVolumeSpecName "kube-api-access-67j4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.672357 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.672795 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-combined-ca-bundle\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.692937 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z875h\" (UniqueName: \"kubernetes.io/projected/5ece1333-c457-4099-bf00-1daa969a14dc-kube-api-access-z875h\") pod \"barbican-keystone-listener-84495d76c8-mkvcb\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.694485 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk2gc\" (UniqueName: \"kubernetes.io/projected/a57aeefc-69e8-4b46-aeec-2aff4026621c-kube-api-access-bk2gc\") pod \"dnsmasq-dns-cf96b7dc5-lfwbc\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.705613 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720355 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69d50d02-732b-47a3-b342-ff1acbb0af7f-logs\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720391 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data-custom\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720443 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720478 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc5rw\" (UniqueName: \"kubernetes.io/projected/69d50d02-732b-47a3-b342-ff1acbb0af7f-kube-api-access-tc5rw\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720556 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-combined-ca-bundle\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720604 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67j4r\" (UniqueName: \"kubernetes.io/projected/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-kube-api-access-67j4r\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720617 4922 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720650 4922 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.720658 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.724360 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2bf381d5-4211-44a0-8fd6-4b1c05fb690d" (UID: "2bf381d5-4211-44a0-8fd6-4b1c05fb690d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.724743 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69d50d02-732b-47a3-b342-ff1acbb0af7f-logs\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.734895 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.742378 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-combined-ca-bundle\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.750604 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.753510 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.761011 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc5rw\" (UniqueName: \"kubernetes.io/projected/69d50d02-732b-47a3-b342-ff1acbb0af7f-kube-api-access-tc5rw\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.761687 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data-custom\") pod \"barbican-api-59488f5ff4-k2cvn\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.799393 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-config-data" (OuterVolumeSpecName: "config-data") pod "2bf381d5-4211-44a0-8fd6-4b1c05fb690d" (UID: "2bf381d5-4211-44a0-8fd6-4b1c05fb690d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.820647 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.821867 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:04 crc kubenswrapper[4922]: I1128 07:16:04.821880 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf381d5-4211-44a0-8fd6-4b1c05fb690d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.117128 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-4p4rg" event={"ID":"2bf381d5-4211-44a0-8fd6-4b1c05fb690d","Type":"ContainerDied","Data":"39ea7b728178336f28d719801f1e298e8f3969a4bbae3640048e674dcf9201be"} Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.117171 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39ea7b728178336f28d719801f1e298e8f3969a4bbae3640048e674dcf9201be" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.117248 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-4p4rg" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.129284 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="ceilometer-notification-agent" containerID="cri-o://794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c" gracePeriod=30 Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.129530 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="proxy-httpd" containerID="cri-o://6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9" gracePeriod=30 Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.129531 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4be2de-2f46-4982-8cd6-b73888d293af","Type":"ContainerStarted","Data":"6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9"} Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.129588 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="sg-core" containerID="cri-o://78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987" gracePeriod=30 Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.129834 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.300299 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5b7cbd995c-rwlzz"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.336350 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:05 crc kubenswrapper[4922]: E1128 07:16:05.336684 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bf381d5-4211-44a0-8fd6-4b1c05fb690d" containerName="cinder-db-sync" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.336706 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bf381d5-4211-44a0-8fd6-4b1c05fb690d" containerName="cinder-db-sync" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.336880 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bf381d5-4211-44a0-8fd6-4b1c05fb690d" containerName="cinder-db-sync" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.337720 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.342863 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.343060 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-4r9g9" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.343213 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.343573 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.384312 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.442391 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-scripts\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.442517 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.445299 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5beca492-9b93-4ea6-9c21-e8e87bf85afd-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.445379 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.445461 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.445572 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7qms\" (UniqueName: \"kubernetes.io/projected/5beca492-9b93-4ea6-9c21-e8e87bf85afd-kube-api-access-z7qms\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.472251 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-84495d76c8-mkvcb"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.539409 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf96b7dc5-lfwbc"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.562863 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7qms\" (UniqueName: \"kubernetes.io/projected/5beca492-9b93-4ea6-9c21-e8e87bf85afd-kube-api-access-z7qms\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.563408 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-scripts\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.563471 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.563500 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5beca492-9b93-4ea6-9c21-e8e87bf85afd-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.563535 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.563576 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.567486 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5beca492-9b93-4ea6-9c21-e8e87bf85afd-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.573810 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-scripts\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.576798 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.580671 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.581686 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.595484 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7qms\" (UniqueName: \"kubernetes.io/projected/5beca492-9b93-4ea6-9c21-e8e87bf85afd-kube-api-access-z7qms\") pod \"cinder-scheduler-0\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.644293 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf96b7dc5-lfwbc"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.658320 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-c55f6679-c2kdb"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.664118 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.685858 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.713281 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c55f6679-c2kdb"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.726528 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-59488f5ff4-k2cvn"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.737280 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.738706 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.741848 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.767788 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768537 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768561 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768584 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-scripts\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768614 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhqgp\" (UniqueName: \"kubernetes.io/projected/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-kube-api-access-fhqgp\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768641 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-swift-storage-0\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768660 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768678 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-logs\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768705 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-nb\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768753 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-config\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768766 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lthm\" (UniqueName: \"kubernetes.io/projected/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-kube-api-access-4lthm\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768784 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768798 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-sb\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.768841 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-svc\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870642 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-svc\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870724 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870747 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870771 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-scripts\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870804 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhqgp\" (UniqueName: \"kubernetes.io/projected/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-kube-api-access-fhqgp\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870834 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870867 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-swift-storage-0\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870888 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-logs\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870919 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-nb\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870971 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-config\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.870988 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lthm\" (UniqueName: \"kubernetes.io/projected/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-kube-api-access-4lthm\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.871009 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.871025 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-sb\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.871909 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-sb\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.872427 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-logs\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.872566 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-swift-storage-0\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.872884 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.875051 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-svc\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.875093 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-nb\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.876408 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.880542 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.893682 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-config\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.896884 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data-custom\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.898043 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-scripts\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.901649 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhqgp\" (UniqueName: \"kubernetes.io/projected/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-kube-api-access-fhqgp\") pod \"cinder-api-0\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " pod="openstack/cinder-api-0" Nov 28 07:16:05 crc kubenswrapper[4922]: I1128 07:16:05.905016 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lthm\" (UniqueName: \"kubernetes.io/projected/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-kube-api-access-4lthm\") pod \"dnsmasq-dns-c55f6679-c2kdb\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.065887 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.076708 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.151635 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" event={"ID":"f8685dc8-7577-4076-8a5a-beba52e9bae7","Type":"ContainerStarted","Data":"d9e9c2b95a6180c1348b95175cebe7c415871cdb6d47555bec506e6e6e9dff41"} Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.152977 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" event={"ID":"a57aeefc-69e8-4b46-aeec-2aff4026621c","Type":"ContainerStarted","Data":"05a030dad76326401a482943167a22eaca8a4def6a099779080a390651f52331"} Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.155279 4922 generic.go:334] "Generic (PLEG): container finished" podID="be4be2de-2f46-4982-8cd6-b73888d293af" containerID="6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9" exitCode=0 Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.155319 4922 generic.go:334] "Generic (PLEG): container finished" podID="be4be2de-2f46-4982-8cd6-b73888d293af" containerID="78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987" exitCode=2 Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.155372 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4be2de-2f46-4982-8cd6-b73888d293af","Type":"ContainerDied","Data":"6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9"} Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.155402 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4be2de-2f46-4982-8cd6-b73888d293af","Type":"ContainerDied","Data":"78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987"} Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.156466 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" event={"ID":"5ece1333-c457-4099-bf00-1daa969a14dc","Type":"ContainerStarted","Data":"547b7eb2131d1818826d54e1ace34f79edd3b16dc86f8564a9effb4b0ce5247c"} Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.157702 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-59488f5ff4-k2cvn" event={"ID":"69d50d02-732b-47a3-b342-ff1acbb0af7f","Type":"ContainerStarted","Data":"2d69f3183359e875eb0d9ca8f2d58ba399578d74b85e125394815f2df59777cb"} Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.157845 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 07:16:06 crc kubenswrapper[4922]: I1128 07:16:06.157865 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 07:16:07 crc kubenswrapper[4922]: I1128 07:16:07.391088 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 07:16:07 crc kubenswrapper[4922]: I1128 07:16:07.392899 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 07:16:07 crc kubenswrapper[4922]: I1128 07:16:07.397297 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 07:16:07 crc kubenswrapper[4922]: I1128 07:16:07.695228 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:07 crc kubenswrapper[4922]: I1128 07:16:07.813766 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:07 crc kubenswrapper[4922]: I1128 07:16:07.823414 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-c55f6679-c2kdb"] Nov 28 07:16:07 crc kubenswrapper[4922]: W1128 07:16:07.827864 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c44439c_6a1b_430d_9dd1_8b0c16033ac1.slice/crio-d6701e3e9ced41b757d35abea2f684ee6daea191c18d96765c7bde2f8a477df1 WatchSource:0}: Error finding container d6701e3e9ced41b757d35abea2f684ee6daea191c18d96765c7bde2f8a477df1: Status 404 returned error can't find the container with id d6701e3e9ced41b757d35abea2f684ee6daea191c18d96765c7bde2f8a477df1 Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.186905 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd60085e-2c1f-4795-95c2-ac9a9a2100bc","Type":"ContainerStarted","Data":"91101bdab729a356a96fe1753a80896d5b0385e62a3dc71a5602a402211512da"} Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.189898 4922 generic.go:334] "Generic (PLEG): container finished" podID="a57aeefc-69e8-4b46-aeec-2aff4026621c" containerID="28c7a774a98a2323e452c6bd805a2593a12ee9e288be70f0ee93a486ccecf3c7" exitCode=0 Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.189950 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" event={"ID":"a57aeefc-69e8-4b46-aeec-2aff4026621c","Type":"ContainerDied","Data":"28c7a774a98a2323e452c6bd805a2593a12ee9e288be70f0ee93a486ccecf3c7"} Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.191537 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" event={"ID":"8c44439c-6a1b-430d-9dd1-8b0c16033ac1","Type":"ContainerStarted","Data":"d6701e3e9ced41b757d35abea2f684ee6daea191c18d96765c7bde2f8a477df1"} Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.202651 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5beca492-9b93-4ea6-9c21-e8e87bf85afd","Type":"ContainerStarted","Data":"1ca275389bbcd46ddec6af21af12472db88f7a4d65d2438b46893aedbd07627e"} Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.221292 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-59488f5ff4-k2cvn" event={"ID":"69d50d02-732b-47a3-b342-ff1acbb0af7f","Type":"ContainerStarted","Data":"dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1"} Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.221339 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-59488f5ff4-k2cvn" event={"ID":"69d50d02-732b-47a3-b342-ff1acbb0af7f","Type":"ContainerStarted","Data":"e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec"} Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.221369 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.221389 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.255663 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-59488f5ff4-k2cvn" podStartSLOduration=4.255642122 podStartE2EDuration="4.255642122s" podCreationTimestamp="2025-11-28 07:16:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:08.242481621 +0000 UTC m=+1413.162877223" watchObservedRunningTime="2025-11-28 07:16:08.255642122 +0000 UTC m=+1413.176037714" Nov 28 07:16:08 crc kubenswrapper[4922]: I1128 07:16:08.275889 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.243029 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd60085e-2c1f-4795-95c2-ac9a9a2100bc","Type":"ContainerStarted","Data":"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba"} Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.245232 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" event={"ID":"a57aeefc-69e8-4b46-aeec-2aff4026621c","Type":"ContainerDied","Data":"05a030dad76326401a482943167a22eaca8a4def6a099779080a390651f52331"} Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.245297 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05a030dad76326401a482943167a22eaca8a4def6a099779080a390651f52331" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.502550 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.651002 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-config\") pod \"a57aeefc-69e8-4b46-aeec-2aff4026621c\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.651360 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-nb\") pod \"a57aeefc-69e8-4b46-aeec-2aff4026621c\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.651446 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bk2gc\" (UniqueName: \"kubernetes.io/projected/a57aeefc-69e8-4b46-aeec-2aff4026621c-kube-api-access-bk2gc\") pod \"a57aeefc-69e8-4b46-aeec-2aff4026621c\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.651484 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-swift-storage-0\") pod \"a57aeefc-69e8-4b46-aeec-2aff4026621c\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.651527 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-sb\") pod \"a57aeefc-69e8-4b46-aeec-2aff4026621c\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.651560 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-svc\") pod \"a57aeefc-69e8-4b46-aeec-2aff4026621c\" (UID: \"a57aeefc-69e8-4b46-aeec-2aff4026621c\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.657460 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a57aeefc-69e8-4b46-aeec-2aff4026621c-kube-api-access-bk2gc" (OuterVolumeSpecName: "kube-api-access-bk2gc") pod "a57aeefc-69e8-4b46-aeec-2aff4026621c" (UID: "a57aeefc-69e8-4b46-aeec-2aff4026621c"). InnerVolumeSpecName "kube-api-access-bk2gc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.685601 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a57aeefc-69e8-4b46-aeec-2aff4026621c" (UID: "a57aeefc-69e8-4b46-aeec-2aff4026621c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.688365 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-config" (OuterVolumeSpecName: "config") pod "a57aeefc-69e8-4b46-aeec-2aff4026621c" (UID: "a57aeefc-69e8-4b46-aeec-2aff4026621c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.696488 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a57aeefc-69e8-4b46-aeec-2aff4026621c" (UID: "a57aeefc-69e8-4b46-aeec-2aff4026621c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.700264 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a57aeefc-69e8-4b46-aeec-2aff4026621c" (UID: "a57aeefc-69e8-4b46-aeec-2aff4026621c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.703672 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a57aeefc-69e8-4b46-aeec-2aff4026621c" (UID: "a57aeefc-69e8-4b46-aeec-2aff4026621c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.753371 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.753405 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.753419 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bk2gc\" (UniqueName: \"kubernetes.io/projected/a57aeefc-69e8-4b46-aeec-2aff4026621c-kube-api-access-bk2gc\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.753431 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.753442 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.753455 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a57aeefc-69e8-4b46-aeec-2aff4026621c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.823379 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.853985 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-scripts\") pod \"be4be2de-2f46-4982-8cd6-b73888d293af\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.854023 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-combined-ca-bundle\") pod \"be4be2de-2f46-4982-8cd6-b73888d293af\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.854049 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8pzb\" (UniqueName: \"kubernetes.io/projected/be4be2de-2f46-4982-8cd6-b73888d293af-kube-api-access-b8pzb\") pod \"be4be2de-2f46-4982-8cd6-b73888d293af\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.854099 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-run-httpd\") pod \"be4be2de-2f46-4982-8cd6-b73888d293af\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.854144 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-sg-core-conf-yaml\") pod \"be4be2de-2f46-4982-8cd6-b73888d293af\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.854205 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-log-httpd\") pod \"be4be2de-2f46-4982-8cd6-b73888d293af\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.854287 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-config-data\") pod \"be4be2de-2f46-4982-8cd6-b73888d293af\" (UID: \"be4be2de-2f46-4982-8cd6-b73888d293af\") " Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.856067 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "be4be2de-2f46-4982-8cd6-b73888d293af" (UID: "be4be2de-2f46-4982-8cd6-b73888d293af"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.856443 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "be4be2de-2f46-4982-8cd6-b73888d293af" (UID: "be4be2de-2f46-4982-8cd6-b73888d293af"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.858957 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-scripts" (OuterVolumeSpecName: "scripts") pod "be4be2de-2f46-4982-8cd6-b73888d293af" (UID: "be4be2de-2f46-4982-8cd6-b73888d293af"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.861141 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be4be2de-2f46-4982-8cd6-b73888d293af-kube-api-access-b8pzb" (OuterVolumeSpecName: "kube-api-access-b8pzb") pod "be4be2de-2f46-4982-8cd6-b73888d293af" (UID: "be4be2de-2f46-4982-8cd6-b73888d293af"). InnerVolumeSpecName "kube-api-access-b8pzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.901744 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "be4be2de-2f46-4982-8cd6-b73888d293af" (UID: "be4be2de-2f46-4982-8cd6-b73888d293af"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.956067 4922 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.956100 4922 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.956114 4922 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be4be2de-2f46-4982-8cd6-b73888d293af-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.956126 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.956137 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8pzb\" (UniqueName: \"kubernetes.io/projected/be4be2de-2f46-4982-8cd6-b73888d293af-kube-api-access-b8pzb\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:09 crc kubenswrapper[4922]: I1128 07:16:09.999320 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be4be2de-2f46-4982-8cd6-b73888d293af" (UID: "be4be2de-2f46-4982-8cd6-b73888d293af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.037713 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-config-data" (OuterVolumeSpecName: "config-data") pod "be4be2de-2f46-4982-8cd6-b73888d293af" (UID: "be4be2de-2f46-4982-8cd6-b73888d293af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.057054 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.057087 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be4be2de-2f46-4982-8cd6-b73888d293af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.283792 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd60085e-2c1f-4795-95c2-ac9a9a2100bc","Type":"ContainerStarted","Data":"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.283883 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerName="cinder-api-log" containerID="cri-o://6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba" gracePeriod=30 Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.284162 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerName="cinder-api" containerID="cri-o://597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c" gracePeriod=30 Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.284178 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.290935 4922 generic.go:334] "Generic (PLEG): container finished" podID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" containerID="5994f721051c71898df7f01da6650b3257270ddbeac198a8a7cc0dea37ebd0ae" exitCode=0 Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.290989 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" event={"ID":"8c44439c-6a1b-430d-9dd1-8b0c16033ac1","Type":"ContainerDied","Data":"5994f721051c71898df7f01da6650b3257270ddbeac198a8a7cc0dea37ebd0ae"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.301456 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5beca492-9b93-4ea6-9c21-e8e87bf85afd","Type":"ContainerStarted","Data":"a698e338df82247c5b9cf333b820d0ac65a36acb96b0926e78bb0e836f443b5e"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.308538 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.308518461 podStartE2EDuration="5.308518461s" podCreationTimestamp="2025-11-28 07:16:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:10.299996004 +0000 UTC m=+1415.220391586" watchObservedRunningTime="2025-11-28 07:16:10.308518461 +0000 UTC m=+1415.228914043" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.323695 4922 generic.go:334] "Generic (PLEG): container finished" podID="be4be2de-2f46-4982-8cd6-b73888d293af" containerID="794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c" exitCode=0 Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.323845 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4be2de-2f46-4982-8cd6-b73888d293af","Type":"ContainerDied","Data":"794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.323908 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be4be2de-2f46-4982-8cd6-b73888d293af","Type":"ContainerDied","Data":"1eddf0b62cd8a11d42c7e5cc0a808e27b0d422aed71302dbb4ff35c4b7f03d47"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.323931 4922 scope.go:117] "RemoveContainer" containerID="6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.324155 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.331074 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" event={"ID":"5ece1333-c457-4099-bf00-1daa969a14dc","Type":"ContainerStarted","Data":"6c09aa779d052923b62c13eee209268276067fcf2d60f0ab88d8d7db5fd25ca4"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.331203 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" event={"ID":"5ece1333-c457-4099-bf00-1daa969a14dc","Type":"ContainerStarted","Data":"3004dce57f11a2ee90d32c564f6d0b320053fb2f43c4069e423362623aae9bfa"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.343760 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf96b7dc5-lfwbc" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.344993 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" event={"ID":"f8685dc8-7577-4076-8a5a-beba52e9bae7","Type":"ContainerStarted","Data":"1013a435a5db00a3fabb1bd3f992a2b6780c248c282088a565511d2d85e0aa49"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.345080 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" event={"ID":"f8685dc8-7577-4076-8a5a-beba52e9bae7","Type":"ContainerStarted","Data":"b4963147ea9e2c2244fba11f3323e999f07d35d36b28648b34f61f69856ae968"} Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.358532 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" podStartSLOduration=2.7675613 podStartE2EDuration="6.358511361s" podCreationTimestamp="2025-11-28 07:16:04 +0000 UTC" firstStartedPulling="2025-11-28 07:16:05.549639086 +0000 UTC m=+1410.470034658" lastFinishedPulling="2025-11-28 07:16:09.140589127 +0000 UTC m=+1414.060984719" observedRunningTime="2025-11-28 07:16:10.352565672 +0000 UTC m=+1415.272961254" watchObservedRunningTime="2025-11-28 07:16:10.358511361 +0000 UTC m=+1415.278906943" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.467588 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" podStartSLOduration=2.637830858 podStartE2EDuration="6.467570612s" podCreationTimestamp="2025-11-28 07:16:04 +0000 UTC" firstStartedPulling="2025-11-28 07:16:05.306865446 +0000 UTC m=+1410.227261038" lastFinishedPulling="2025-11-28 07:16:09.13660521 +0000 UTC m=+1414.057000792" observedRunningTime="2025-11-28 07:16:10.37653506 +0000 UTC m=+1415.296930652" watchObservedRunningTime="2025-11-28 07:16:10.467570612 +0000 UTC m=+1415.387966194" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.498775 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.524613 4922 scope.go:117] "RemoveContainer" containerID="78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.530974 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.547677 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:10 crc kubenswrapper[4922]: E1128 07:16:10.548077 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="proxy-httpd" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.548089 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="proxy-httpd" Nov 28 07:16:10 crc kubenswrapper[4922]: E1128 07:16:10.548110 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="sg-core" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.548117 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="sg-core" Nov 28 07:16:10 crc kubenswrapper[4922]: E1128 07:16:10.548128 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="ceilometer-notification-agent" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.548135 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="ceilometer-notification-agent" Nov 28 07:16:10 crc kubenswrapper[4922]: E1128 07:16:10.548152 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a57aeefc-69e8-4b46-aeec-2aff4026621c" containerName="init" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.548158 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="a57aeefc-69e8-4b46-aeec-2aff4026621c" containerName="init" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.548365 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="a57aeefc-69e8-4b46-aeec-2aff4026621c" containerName="init" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.548384 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="proxy-httpd" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.548401 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="ceilometer-notification-agent" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.548412 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" containerName="sg-core" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.550060 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.552726 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.553024 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.580465 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf96b7dc5-lfwbc"] Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.587822 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.594637 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf96b7dc5-lfwbc"] Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.597564 4922 scope.go:117] "RemoveContainer" containerID="794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.643621 4922 scope.go:117] "RemoveContainer" containerID="6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9" Nov 28 07:16:10 crc kubenswrapper[4922]: E1128 07:16:10.644319 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9\": container with ID starting with 6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9 not found: ID does not exist" containerID="6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.644372 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9"} err="failed to get container status \"6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9\": rpc error: code = NotFound desc = could not find container \"6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9\": container with ID starting with 6cf6e8e9ddb91b66c5ae09709d86caedb72b5ff87f3e37e0412675fb438effc9 not found: ID does not exist" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.644400 4922 scope.go:117] "RemoveContainer" containerID="78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987" Nov 28 07:16:10 crc kubenswrapper[4922]: E1128 07:16:10.644733 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987\": container with ID starting with 78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987 not found: ID does not exist" containerID="78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.644799 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987"} err="failed to get container status \"78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987\": rpc error: code = NotFound desc = could not find container \"78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987\": container with ID starting with 78fe5a4b19803c14f56518cac6656d53e08b75375e0c763c79911b31d05d3987 not found: ID does not exist" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.644843 4922 scope.go:117] "RemoveContainer" containerID="794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c" Nov 28 07:16:10 crc kubenswrapper[4922]: E1128 07:16:10.645256 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c\": container with ID starting with 794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c not found: ID does not exist" containerID="794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.645280 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c"} err="failed to get container status \"794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c\": rpc error: code = NotFound desc = could not find container \"794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c\": container with ID starting with 794e738fc60e5d5c994b55691a1a29e41923ef5298bedb39dc71b6676bddf78c not found: ID does not exist" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.667819 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-scripts\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.667873 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-config-data\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.667931 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.667954 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.668018 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-run-httpd\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.668050 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlbl6\" (UniqueName: \"kubernetes.io/projected/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-kube-api-access-vlbl6\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.668087 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-log-httpd\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.769874 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.769924 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.770001 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-run-httpd\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.770052 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlbl6\" (UniqueName: \"kubernetes.io/projected/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-kube-api-access-vlbl6\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.770086 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-log-httpd\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.770172 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-scripts\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.770201 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-config-data\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.770717 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-run-httpd\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.771396 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-log-httpd\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.775396 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-scripts\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.775634 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.775833 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.783829 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-config-data\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.807671 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlbl6\" (UniqueName: \"kubernetes.io/projected/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-kube-api-access-vlbl6\") pod \"ceilometer-0\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.893678 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:10 crc kubenswrapper[4922]: I1128 07:16:10.951704 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.076452 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data\") pod \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.076507 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-logs\") pod \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.076543 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-scripts\") pod \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.076597 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhqgp\" (UniqueName: \"kubernetes.io/projected/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-kube-api-access-fhqgp\") pod \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.076677 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-combined-ca-bundle\") pod \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.076727 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data-custom\") pod \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.076765 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-etc-machine-id\") pod \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\" (UID: \"cd60085e-2c1f-4795-95c2-ac9a9a2100bc\") " Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.077044 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-logs" (OuterVolumeSpecName: "logs") pod "cd60085e-2c1f-4795-95c2-ac9a9a2100bc" (UID: "cd60085e-2c1f-4795-95c2-ac9a9a2100bc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.077111 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "cd60085e-2c1f-4795-95c2-ac9a9a2100bc" (UID: "cd60085e-2c1f-4795-95c2-ac9a9a2100bc"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.077321 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.077342 4922 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.082503 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cd60085e-2c1f-4795-95c2-ac9a9a2100bc" (UID: "cd60085e-2c1f-4795-95c2-ac9a9a2100bc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.083161 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-kube-api-access-fhqgp" (OuterVolumeSpecName: "kube-api-access-fhqgp") pod "cd60085e-2c1f-4795-95c2-ac9a9a2100bc" (UID: "cd60085e-2c1f-4795-95c2-ac9a9a2100bc"). InnerVolumeSpecName "kube-api-access-fhqgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.089382 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-scripts" (OuterVolumeSpecName: "scripts") pod "cd60085e-2c1f-4795-95c2-ac9a9a2100bc" (UID: "cd60085e-2c1f-4795-95c2-ac9a9a2100bc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.140144 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd60085e-2c1f-4795-95c2-ac9a9a2100bc" (UID: "cd60085e-2c1f-4795-95c2-ac9a9a2100bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.156398 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data" (OuterVolumeSpecName: "config-data") pod "cd60085e-2c1f-4795-95c2-ac9a9a2100bc" (UID: "cd60085e-2c1f-4795-95c2-ac9a9a2100bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.179069 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.179098 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.179107 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.179115 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhqgp\" (UniqueName: \"kubernetes.io/projected/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-kube-api-access-fhqgp\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.179126 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd60085e-2c1f-4795-95c2-ac9a9a2100bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.361823 4922 generic.go:334] "Generic (PLEG): container finished" podID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerID="597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c" exitCode=0 Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.362104 4922 generic.go:334] "Generic (PLEG): container finished" podID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerID="6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba" exitCode=143 Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.361880 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.361892 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd60085e-2c1f-4795-95c2-ac9a9a2100bc","Type":"ContainerDied","Data":"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c"} Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.363231 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd60085e-2c1f-4795-95c2-ac9a9a2100bc","Type":"ContainerDied","Data":"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba"} Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.363264 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cd60085e-2c1f-4795-95c2-ac9a9a2100bc","Type":"ContainerDied","Data":"91101bdab729a356a96fe1753a80896d5b0385e62a3dc71a5602a402211512da"} Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.363271 4922 scope.go:117] "RemoveContainer" containerID="597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.365802 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" event={"ID":"8c44439c-6a1b-430d-9dd1-8b0c16033ac1","Type":"ContainerStarted","Data":"e41fe74d51b86bfbb91f2da8a028e499fce1d7462fc60532955d0af580095be8"} Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.365838 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.384737 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5beca492-9b93-4ea6-9c21-e8e87bf85afd","Type":"ContainerStarted","Data":"693bf830b9489a6970be95062ccee89ae267fa47f7f57fbf4d165cd2af36aec4"} Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.401101 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" podStartSLOduration=6.40108419 podStartE2EDuration="6.40108419s" podCreationTimestamp="2025-11-28 07:16:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:11.39471744 +0000 UTC m=+1416.315113012" watchObservedRunningTime="2025-11-28 07:16:11.40108419 +0000 UTC m=+1416.321479772" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.413909 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a57aeefc-69e8-4b46-aeec-2aff4026621c" path="/var/lib/kubelet/pods/a57aeefc-69e8-4b46-aeec-2aff4026621c/volumes" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.414481 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be4be2de-2f46-4982-8cd6-b73888d293af" path="/var/lib/kubelet/pods/be4be2de-2f46-4982-8cd6-b73888d293af/volumes" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.418122 4922 scope.go:117] "RemoveContainer" containerID="6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.452296 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7947bcd956-482dv"] Nov 28 07:16:11 crc kubenswrapper[4922]: E1128 07:16:11.452752 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerName="cinder-api-log" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.452773 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerName="cinder-api-log" Nov 28 07:16:11 crc kubenswrapper[4922]: E1128 07:16:11.452792 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerName="cinder-api" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.452800 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerName="cinder-api" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.452992 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerName="cinder-api" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.453016 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" containerName="cinder-api-log" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.454003 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.456697 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.456954 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.472387 4922 scope.go:117] "RemoveContainer" containerID="597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c" Nov 28 07:16:11 crc kubenswrapper[4922]: E1128 07:16:11.474099 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c\": container with ID starting with 597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c not found: ID does not exist" containerID="597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.474130 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c"} err="failed to get container status \"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c\": rpc error: code = NotFound desc = could not find container \"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c\": container with ID starting with 597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c not found: ID does not exist" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.474150 4922 scope.go:117] "RemoveContainer" containerID="6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba" Nov 28 07:16:11 crc kubenswrapper[4922]: E1128 07:16:11.474981 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba\": container with ID starting with 6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba not found: ID does not exist" containerID="6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.475007 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba"} err="failed to get container status \"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba\": rpc error: code = NotFound desc = could not find container \"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba\": container with ID starting with 6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba not found: ID does not exist" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.475022 4922 scope.go:117] "RemoveContainer" containerID="597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.475274 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c"} err="failed to get container status \"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c\": rpc error: code = NotFound desc = could not find container \"597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c\": container with ID starting with 597e2124631909d5d3ca9315902e229e5e3e23f0f35cc00312b1c186a717fa0c not found: ID does not exist" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.475315 4922 scope.go:117] "RemoveContainer" containerID="6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.479649 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba"} err="failed to get container status \"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba\": rpc error: code = NotFound desc = could not find container \"6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba\": container with ID starting with 6833749d048e00c9297ccaf7982a9904b71f78a21eb365c63a3f7fbb479660ba not found: ID does not exist" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.479709 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.491379 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.508512 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7947bcd956-482dv"] Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.515887 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.518553 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.520325 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.520634 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.521489 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.522995 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.857485781 podStartE2EDuration="6.522979432s" podCreationTimestamp="2025-11-28 07:16:05 +0000 UTC" firstStartedPulling="2025-11-28 07:16:07.82971486 +0000 UTC m=+1412.750110442" lastFinishedPulling="2025-11-28 07:16:09.495208511 +0000 UTC m=+1414.415604093" observedRunningTime="2025-11-28 07:16:11.446781985 +0000 UTC m=+1416.367177567" watchObservedRunningTime="2025-11-28 07:16:11.522979432 +0000 UTC m=+1416.443375014" Nov 28 07:16:11 crc kubenswrapper[4922]: W1128 07:16:11.528467 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9abee755_a9bf_49f0_85ad_e5c58f2e35c2.slice/crio-d73add646b1c847abfeacd9d36c803085131b91826db108b4d0d7ba0b0e9b646 WatchSource:0}: Error finding container d73add646b1c847abfeacd9d36c803085131b91826db108b4d0d7ba0b0e9b646: Status 404 returned error can't find the container with id d73add646b1c847abfeacd9d36c803085131b91826db108b4d0d7ba0b0e9b646 Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.534803 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.552915 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.590081 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d97532b-e9ff-4031-a82c-3db5e943bfd9-logs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.590123 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-combined-ca-bundle\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.590161 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkc4c\" (UniqueName: \"kubernetes.io/projected/1d97532b-e9ff-4031-a82c-3db5e943bfd9-kube-api-access-dkc4c\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.590409 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data-custom\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.591177 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-public-tls-certs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.591301 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-internal-tls-certs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.591395 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.692681 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d97532b-e9ff-4031-a82c-3db5e943bfd9-logs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.692762 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-combined-ca-bundle\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.692813 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkc4c\" (UniqueName: \"kubernetes.io/projected/1d97532b-e9ff-4031-a82c-3db5e943bfd9-kube-api-access-dkc4c\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.692867 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-scripts\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.692943 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6kxw\" (UniqueName: \"kubernetes.io/projected/c038b865-4b32-4be3-9e0a-8c40dc140a68-kube-api-access-c6kxw\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.692982 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c038b865-4b32-4be3-9e0a-8c40dc140a68-logs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693015 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data-custom\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693078 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693117 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c038b865-4b32-4be3-9e0a-8c40dc140a68-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693158 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693195 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-public-tls-certs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693256 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-internal-tls-certs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693296 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693351 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693440 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.693503 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data-custom\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.694286 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d97532b-e9ff-4031-a82c-3db5e943bfd9-logs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.700142 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data-custom\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.700170 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-combined-ca-bundle\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.700531 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-internal-tls-certs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.700997 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-public-tls-certs\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.708015 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.710820 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkc4c\" (UniqueName: \"kubernetes.io/projected/1d97532b-e9ff-4031-a82c-3db5e943bfd9-kube-api-access-dkc4c\") pod \"barbican-api-7947bcd956-482dv\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.771760 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795199 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-scripts\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795323 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6kxw\" (UniqueName: \"kubernetes.io/projected/c038b865-4b32-4be3-9e0a-8c40dc140a68-kube-api-access-c6kxw\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795356 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c038b865-4b32-4be3-9e0a-8c40dc140a68-logs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795403 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795436 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c038b865-4b32-4be3-9e0a-8c40dc140a68-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795465 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795495 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795534 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.795582 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data-custom\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.796083 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c038b865-4b32-4be3-9e0a-8c40dc140a68-logs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.796131 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c038b865-4b32-4be3-9e0a-8c40dc140a68-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.800504 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.802706 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.802714 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-scripts\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.802743 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.809622 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.810132 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data-custom\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.827427 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6kxw\" (UniqueName: \"kubernetes.io/projected/c038b865-4b32-4be3-9e0a-8c40dc140a68-kube-api-access-c6kxw\") pod \"cinder-api-0\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " pod="openstack/cinder-api-0" Nov 28 07:16:11 crc kubenswrapper[4922]: I1128 07:16:11.880833 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 07:16:12 crc kubenswrapper[4922]: W1128 07:16:12.275869 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1d97532b_e9ff_4031_a82c_3db5e943bfd9.slice/crio-b3b3ec913b61a7608df44accca83fe6463e5e0433ec895ffe07bfa1b397cf3e0 WatchSource:0}: Error finding container b3b3ec913b61a7608df44accca83fe6463e5e0433ec895ffe07bfa1b397cf3e0: Status 404 returned error can't find the container with id b3b3ec913b61a7608df44accca83fe6463e5e0433ec895ffe07bfa1b397cf3e0 Nov 28 07:16:12 crc kubenswrapper[4922]: I1128 07:16:12.276826 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7947bcd956-482dv"] Nov 28 07:16:12 crc kubenswrapper[4922]: I1128 07:16:12.346236 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:16:12 crc kubenswrapper[4922]: W1128 07:16:12.346707 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc038b865_4b32_4be3_9e0a_8c40dc140a68.slice/crio-f447d48c61d66ccf6c46935127264a15891bc65cafff24e3f5f7f56ca42f5a25 WatchSource:0}: Error finding container f447d48c61d66ccf6c46935127264a15891bc65cafff24e3f5f7f56ca42f5a25: Status 404 returned error can't find the container with id f447d48c61d66ccf6c46935127264a15891bc65cafff24e3f5f7f56ca42f5a25 Nov 28 07:16:12 crc kubenswrapper[4922]: I1128 07:16:12.405628 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c038b865-4b32-4be3-9e0a-8c40dc140a68","Type":"ContainerStarted","Data":"f447d48c61d66ccf6c46935127264a15891bc65cafff24e3f5f7f56ca42f5a25"} Nov 28 07:16:12 crc kubenswrapper[4922]: I1128 07:16:12.408720 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerStarted","Data":"d73add646b1c847abfeacd9d36c803085131b91826db108b4d0d7ba0b0e9b646"} Nov 28 07:16:12 crc kubenswrapper[4922]: I1128 07:16:12.411992 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7947bcd956-482dv" event={"ID":"1d97532b-e9ff-4031-a82c-3db5e943bfd9","Type":"ContainerStarted","Data":"b3b3ec913b61a7608df44accca83fe6463e5e0433ec895ffe07bfa1b397cf3e0"} Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.411064 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd60085e-2c1f-4795-95c2-ac9a9a2100bc" path="/var/lib/kubelet/pods/cd60085e-2c1f-4795-95c2-ac9a9a2100bc/volumes" Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.424528 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c038b865-4b32-4be3-9e0a-8c40dc140a68","Type":"ContainerStarted","Data":"31b146f13fd3f77793153557e7b67237270c42c2a177ad53d0c2fee7a88ba3e1"} Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.426378 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerStarted","Data":"8f93d8f601f3f004d218215d678891da73bd9d1c7c1fafa0468e8c068a5d027f"} Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.426411 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerStarted","Data":"4813e7caa96017317260102272646b375cfb0680074050a84464f8277f0d586f"} Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.431526 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7947bcd956-482dv" event={"ID":"1d97532b-e9ff-4031-a82c-3db5e943bfd9","Type":"ContainerStarted","Data":"8d636352f04a5d6b017ec6d07127ee84924adb2187df00d394fe1e1c6dd31678"} Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.431561 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7947bcd956-482dv" event={"ID":"1d97532b-e9ff-4031-a82c-3db5e943bfd9","Type":"ContainerStarted","Data":"565068b14a54fb0888fb374689df2d8105a821181b3cf92257d558b607b5df90"} Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.432710 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.432752 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:13 crc kubenswrapper[4922]: I1128 07:16:13.457663 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7947bcd956-482dv" podStartSLOduration=2.457637717 podStartE2EDuration="2.457637717s" podCreationTimestamp="2025-11-28 07:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:13.451808532 +0000 UTC m=+1418.372204124" watchObservedRunningTime="2025-11-28 07:16:13.457637717 +0000 UTC m=+1418.378033319" Nov 28 07:16:14 crc kubenswrapper[4922]: I1128 07:16:14.444443 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c038b865-4b32-4be3-9e0a-8c40dc140a68","Type":"ContainerStarted","Data":"b1de83b3a6f902b036e25483c19523a902ba3f397b59a81a79e88727c51fa4bb"} Nov 28 07:16:14 crc kubenswrapper[4922]: I1128 07:16:14.444810 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 07:16:14 crc kubenswrapper[4922]: I1128 07:16:14.448003 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerStarted","Data":"a9691659d3ff87f88c40b63c23edb6d642acab95cce0a8ee57f6220a5ee64034"} Nov 28 07:16:14 crc kubenswrapper[4922]: I1128 07:16:14.472685 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.472653392 podStartE2EDuration="3.472653392s" podCreationTimestamp="2025-11-28 07:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:14.466773986 +0000 UTC m=+1419.387169568" watchObservedRunningTime="2025-11-28 07:16:14.472653392 +0000 UTC m=+1419.393048964" Nov 28 07:16:15 crc kubenswrapper[4922]: I1128 07:16:15.686369 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 07:16:15 crc kubenswrapper[4922]: I1128 07:16:15.886255 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.068945 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.135068 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f8dc44d89-v7d7h"] Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.135363 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" podUID="7decb3d6-6624-42fe-9155-4b75224ca2f7" containerName="dnsmasq-dns" containerID="cri-o://e7a8226902f6423b5db458b6518ebb22657ad05d0256b51c6da3053ebf656920" gracePeriod=10 Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.243988 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.413581 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.501663 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerStarted","Data":"24c474c965b970a0b3c56c3c7e70c4cae7c1ae26b1b3ea9a8673e89232f551af"} Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.502059 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.506725 4922 generic.go:334] "Generic (PLEG): container finished" podID="7decb3d6-6624-42fe-9155-4b75224ca2f7" containerID="e7a8226902f6423b5db458b6518ebb22657ad05d0256b51c6da3053ebf656920" exitCode=0 Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.506867 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" event={"ID":"7decb3d6-6624-42fe-9155-4b75224ca2f7","Type":"ContainerDied","Data":"e7a8226902f6423b5db458b6518ebb22657ad05d0256b51c6da3053ebf656920"} Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.530856 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.60495344 podStartE2EDuration="6.530837322s" podCreationTimestamp="2025-11-28 07:16:10 +0000 UTC" firstStartedPulling="2025-11-28 07:16:11.530659587 +0000 UTC m=+1416.451055169" lastFinishedPulling="2025-11-28 07:16:15.456543429 +0000 UTC m=+1420.376939051" observedRunningTime="2025-11-28 07:16:16.523008504 +0000 UTC m=+1421.443404086" watchObservedRunningTime="2025-11-28 07:16:16.530837322 +0000 UTC m=+1421.451232904" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.571552 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.639167 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.790539 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-sb\") pod \"7decb3d6-6624-42fe-9155-4b75224ca2f7\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.790736 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-svc\") pod \"7decb3d6-6624-42fe-9155-4b75224ca2f7\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.790769 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-nb\") pod \"7decb3d6-6624-42fe-9155-4b75224ca2f7\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.790840 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-config\") pod \"7decb3d6-6624-42fe-9155-4b75224ca2f7\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.790867 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-swift-storage-0\") pod \"7decb3d6-6624-42fe-9155-4b75224ca2f7\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.790883 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjw5n\" (UniqueName: \"kubernetes.io/projected/7decb3d6-6624-42fe-9155-4b75224ca2f7-kube-api-access-rjw5n\") pod \"7decb3d6-6624-42fe-9155-4b75224ca2f7\" (UID: \"7decb3d6-6624-42fe-9155-4b75224ca2f7\") " Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.799478 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7decb3d6-6624-42fe-9155-4b75224ca2f7-kube-api-access-rjw5n" (OuterVolumeSpecName: "kube-api-access-rjw5n") pod "7decb3d6-6624-42fe-9155-4b75224ca2f7" (UID: "7decb3d6-6624-42fe-9155-4b75224ca2f7"). InnerVolumeSpecName "kube-api-access-rjw5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.853290 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-config" (OuterVolumeSpecName: "config") pod "7decb3d6-6624-42fe-9155-4b75224ca2f7" (UID: "7decb3d6-6624-42fe-9155-4b75224ca2f7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.856262 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7decb3d6-6624-42fe-9155-4b75224ca2f7" (UID: "7decb3d6-6624-42fe-9155-4b75224ca2f7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.856457 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7decb3d6-6624-42fe-9155-4b75224ca2f7" (UID: "7decb3d6-6624-42fe-9155-4b75224ca2f7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.859164 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7decb3d6-6624-42fe-9155-4b75224ca2f7" (UID: "7decb3d6-6624-42fe-9155-4b75224ca2f7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.882357 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7decb3d6-6624-42fe-9155-4b75224ca2f7" (UID: "7decb3d6-6624-42fe-9155-4b75224ca2f7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.892990 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.893039 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjw5n\" (UniqueName: \"kubernetes.io/projected/7decb3d6-6624-42fe-9155-4b75224ca2f7-kube-api-access-rjw5n\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.893054 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.893066 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.893081 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:16 crc kubenswrapper[4922]: I1128 07:16:16.893094 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7decb3d6-6624-42fe-9155-4b75224ca2f7-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.413486 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.530395 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" event={"ID":"7decb3d6-6624-42fe-9155-4b75224ca2f7","Type":"ContainerDied","Data":"ea82a31598be35630309439df235e312a7df5736861f78d331eb78effec5cded"} Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.530456 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8dc44d89-v7d7h" Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.530465 4922 scope.go:117] "RemoveContainer" containerID="e7a8226902f6423b5db458b6518ebb22657ad05d0256b51c6da3053ebf656920" Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.530843 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerName="cinder-scheduler" containerID="cri-o://a698e338df82247c5b9cf333b820d0ac65a36acb96b0926e78bb0e836f443b5e" gracePeriod=30 Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.530946 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerName="probe" containerID="cri-o://693bf830b9489a6970be95062ccee89ae267fa47f7f57fbf4d165cd2af36aec4" gracePeriod=30 Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.563445 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f8dc44d89-v7d7h"] Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.573103 4922 scope.go:117] "RemoveContainer" containerID="3fcad4ad664b96b8b0d86552e09f9a89fbbaceb658e95144e93f0e722e8345a9" Nov 28 07:16:17 crc kubenswrapper[4922]: I1128 07:16:17.581992 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f8dc44d89-v7d7h"] Nov 28 07:16:18 crc kubenswrapper[4922]: I1128 07:16:18.540694 4922 generic.go:334] "Generic (PLEG): container finished" podID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerID="693bf830b9489a6970be95062ccee89ae267fa47f7f57fbf4d165cd2af36aec4" exitCode=0 Nov 28 07:16:18 crc kubenswrapper[4922]: I1128 07:16:18.540755 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5beca492-9b93-4ea6-9c21-e8e87bf85afd","Type":"ContainerDied","Data":"693bf830b9489a6970be95062ccee89ae267fa47f7f57fbf4d165cd2af36aec4"} Nov 28 07:16:19 crc kubenswrapper[4922]: I1128 07:16:19.410354 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7decb3d6-6624-42fe-9155-4b75224ca2f7" path="/var/lib/kubelet/pods/7decb3d6-6624-42fe-9155-4b75224ca2f7/volumes" Nov 28 07:16:19 crc kubenswrapper[4922]: I1128 07:16:19.990129 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:16:20 crc kubenswrapper[4922]: I1128 07:16:20.075248 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-64bdbc5658-v8ngg"] Nov 28 07:16:20 crc kubenswrapper[4922]: I1128 07:16:20.075501 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-64bdbc5658-v8ngg" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerName="neutron-api" containerID="cri-o://0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627" gracePeriod=30 Nov 28 07:16:20 crc kubenswrapper[4922]: I1128 07:16:20.075965 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-64bdbc5658-v8ngg" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerName="neutron-httpd" containerID="cri-o://ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113" gracePeriod=30 Nov 28 07:16:20 crc kubenswrapper[4922]: I1128 07:16:20.172299 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:16:20 crc kubenswrapper[4922]: I1128 07:16:20.175480 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:16:20 crc kubenswrapper[4922]: I1128 07:16:20.562235 4922 generic.go:334] "Generic (PLEG): container finished" podID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerID="ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113" exitCode=0 Nov 28 07:16:20 crc kubenswrapper[4922]: I1128 07:16:20.562259 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bdbc5658-v8ngg" event={"ID":"0a307c19-4be2-44f2-8034-00ebfa265aac","Type":"ContainerDied","Data":"ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113"} Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.572232 4922 generic.go:334] "Generic (PLEG): container finished" podID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerID="a698e338df82247c5b9cf333b820d0ac65a36acb96b0926e78bb0e836f443b5e" exitCode=0 Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.572315 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5beca492-9b93-4ea6-9c21-e8e87bf85afd","Type":"ContainerDied","Data":"a698e338df82247c5b9cf333b820d0ac65a36acb96b0926e78bb0e836f443b5e"} Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.888175 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.983742 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data-custom\") pod \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.983785 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data\") pod \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.983882 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5beca492-9b93-4ea6-9c21-e8e87bf85afd-etc-machine-id\") pod \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.983930 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7qms\" (UniqueName: \"kubernetes.io/projected/5beca492-9b93-4ea6-9c21-e8e87bf85afd-kube-api-access-z7qms\") pod \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.983967 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-scripts\") pod \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.984003 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-combined-ca-bundle\") pod \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\" (UID: \"5beca492-9b93-4ea6-9c21-e8e87bf85afd\") " Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.984088 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5beca492-9b93-4ea6-9c21-e8e87bf85afd-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5beca492-9b93-4ea6-9c21-e8e87bf85afd" (UID: "5beca492-9b93-4ea6-9c21-e8e87bf85afd"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.984385 4922 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5beca492-9b93-4ea6-9c21-e8e87bf85afd-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.990718 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5beca492-9b93-4ea6-9c21-e8e87bf85afd-kube-api-access-z7qms" (OuterVolumeSpecName: "kube-api-access-z7qms") pod "5beca492-9b93-4ea6-9c21-e8e87bf85afd" (UID: "5beca492-9b93-4ea6-9c21-e8e87bf85afd"). InnerVolumeSpecName "kube-api-access-z7qms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:21 crc kubenswrapper[4922]: I1128 07:16:21.997404 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5beca492-9b93-4ea6-9c21-e8e87bf85afd" (UID: "5beca492-9b93-4ea6-9c21-e8e87bf85afd"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.006435 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-scripts" (OuterVolumeSpecName: "scripts") pod "5beca492-9b93-4ea6-9c21-e8e87bf85afd" (UID: "5beca492-9b93-4ea6-9c21-e8e87bf85afd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.059211 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5beca492-9b93-4ea6-9c21-e8e87bf85afd" (UID: "5beca492-9b93-4ea6-9c21-e8e87bf85afd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.087839 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.087875 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7qms\" (UniqueName: \"kubernetes.io/projected/5beca492-9b93-4ea6-9c21-e8e87bf85afd-kube-api-access-z7qms\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.087886 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.087895 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.112676 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data" (OuterVolumeSpecName: "config-data") pod "5beca492-9b93-4ea6-9c21-e8e87bf85afd" (UID: "5beca492-9b93-4ea6-9c21-e8e87bf85afd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.190243 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5beca492-9b93-4ea6-9c21-e8e87bf85afd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.582129 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5beca492-9b93-4ea6-9c21-e8e87bf85afd","Type":"ContainerDied","Data":"1ca275389bbcd46ddec6af21af12472db88f7a4d65d2438b46893aedbd07627e"} Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.582420 4922 scope.go:117] "RemoveContainer" containerID="693bf830b9489a6970be95062ccee89ae267fa47f7f57fbf4d165cd2af36aec4" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.582267 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.603879 4922 scope.go:117] "RemoveContainer" containerID="a698e338df82247c5b9cf333b820d0ac65a36acb96b0926e78bb0e836f443b5e" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.619329 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.631930 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.657037 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:22 crc kubenswrapper[4922]: E1128 07:16:22.657525 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerName="probe" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.657550 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerName="probe" Nov 28 07:16:22 crc kubenswrapper[4922]: E1128 07:16:22.657574 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7decb3d6-6624-42fe-9155-4b75224ca2f7" containerName="init" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.657583 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7decb3d6-6624-42fe-9155-4b75224ca2f7" containerName="init" Nov 28 07:16:22 crc kubenswrapper[4922]: E1128 07:16:22.657608 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7decb3d6-6624-42fe-9155-4b75224ca2f7" containerName="dnsmasq-dns" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.657616 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7decb3d6-6624-42fe-9155-4b75224ca2f7" containerName="dnsmasq-dns" Nov 28 07:16:22 crc kubenswrapper[4922]: E1128 07:16:22.657629 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerName="cinder-scheduler" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.657636 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerName="cinder-scheduler" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.657867 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerName="cinder-scheduler" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.657897 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" containerName="probe" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.657908 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7decb3d6-6624-42fe-9155-4b75224ca2f7" containerName="dnsmasq-dns" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.660374 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.663164 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.667635 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.799754 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.799801 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbcvt\" (UniqueName: \"kubernetes.io/projected/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-kube-api-access-nbcvt\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.800022 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.800121 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.800166 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.800232 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-scripts\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.902101 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.902161 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbcvt\" (UniqueName: \"kubernetes.io/projected/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-kube-api-access-nbcvt\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.902246 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.902291 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.902332 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.902343 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.902364 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-scripts\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.909834 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-scripts\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.910839 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.919145 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.919970 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.920321 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbcvt\" (UniqueName: \"kubernetes.io/projected/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-kube-api-access-nbcvt\") pod \"cinder-scheduler-0\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " pod="openstack/cinder-scheduler-0" Nov 28 07:16:22 crc kubenswrapper[4922]: I1128 07:16:22.978701 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 07:16:23 crc kubenswrapper[4922]: I1128 07:16:23.411303 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5beca492-9b93-4ea6-9c21-e8e87bf85afd" path="/var/lib/kubelet/pods/5beca492-9b93-4ea6-9c21-e8e87bf85afd/volumes" Nov 28 07:16:23 crc kubenswrapper[4922]: I1128 07:16:23.459591 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:16:23 crc kubenswrapper[4922]: W1128 07:16:23.467464 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod678d1f5b_5ebc_4b9e_b5ab_316ec7dfda05.slice/crio-73eda3046837957e632c01f6c732ea7f1263aaaf13c41e448b812853ee42f156 WatchSource:0}: Error finding container 73eda3046837957e632c01f6c732ea7f1263aaaf13c41e448b812853ee42f156: Status 404 returned error can't find the container with id 73eda3046837957e632c01f6c732ea7f1263aaaf13c41e448b812853ee42f156 Nov 28 07:16:23 crc kubenswrapper[4922]: I1128 07:16:23.522040 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:23 crc kubenswrapper[4922]: I1128 07:16:23.594327 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05","Type":"ContainerStarted","Data":"73eda3046837957e632c01f6c732ea7f1263aaaf13c41e448b812853ee42f156"} Nov 28 07:16:23 crc kubenswrapper[4922]: I1128 07:16:23.654884 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:16:23 crc kubenswrapper[4922]: I1128 07:16:23.734811 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-59488f5ff4-k2cvn"] Nov 28 07:16:23 crc kubenswrapper[4922]: I1128 07:16:23.735085 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-59488f5ff4-k2cvn" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api-log" containerID="cri-o://e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec" gracePeriod=30 Nov 28 07:16:23 crc kubenswrapper[4922]: I1128 07:16:23.735570 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-59488f5ff4-k2cvn" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api" containerID="cri-o://dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1" gracePeriod=30 Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.148820 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.261458 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.342734 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9h4zp\" (UniqueName: \"kubernetes.io/projected/0a307c19-4be2-44f2-8034-00ebfa265aac-kube-api-access-9h4zp\") pod \"0a307c19-4be2-44f2-8034-00ebfa265aac\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.342796 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-combined-ca-bundle\") pod \"0a307c19-4be2-44f2-8034-00ebfa265aac\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.342825 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-httpd-config\") pod \"0a307c19-4be2-44f2-8034-00ebfa265aac\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.342953 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-ovndb-tls-certs\") pod \"0a307c19-4be2-44f2-8034-00ebfa265aac\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.343019 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-config\") pod \"0a307c19-4be2-44f2-8034-00ebfa265aac\" (UID: \"0a307c19-4be2-44f2-8034-00ebfa265aac\") " Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.347021 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a307c19-4be2-44f2-8034-00ebfa265aac-kube-api-access-9h4zp" (OuterVolumeSpecName: "kube-api-access-9h4zp") pod "0a307c19-4be2-44f2-8034-00ebfa265aac" (UID: "0a307c19-4be2-44f2-8034-00ebfa265aac"). InnerVolumeSpecName "kube-api-access-9h4zp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.347434 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "0a307c19-4be2-44f2-8034-00ebfa265aac" (UID: "0a307c19-4be2-44f2-8034-00ebfa265aac"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.419382 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a307c19-4be2-44f2-8034-00ebfa265aac" (UID: "0a307c19-4be2-44f2-8034-00ebfa265aac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.427856 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-config" (OuterVolumeSpecName: "config") pod "0a307c19-4be2-44f2-8034-00ebfa265aac" (UID: "0a307c19-4be2-44f2-8034-00ebfa265aac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.443959 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "0a307c19-4be2-44f2-8034-00ebfa265aac" (UID: "0a307c19-4be2-44f2-8034-00ebfa265aac"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.445478 4922 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.445940 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.446128 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9h4zp\" (UniqueName: \"kubernetes.io/projected/0a307c19-4be2-44f2-8034-00ebfa265aac-kube-api-access-9h4zp\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.446191 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.446285 4922 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/0a307c19-4be2-44f2-8034-00ebfa265aac-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.607160 4922 generic.go:334] "Generic (PLEG): container finished" podID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerID="e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec" exitCode=143 Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.607238 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-59488f5ff4-k2cvn" event={"ID":"69d50d02-732b-47a3-b342-ff1acbb0af7f","Type":"ContainerDied","Data":"e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec"} Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.610082 4922 generic.go:334] "Generic (PLEG): container finished" podID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerID="0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627" exitCode=0 Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.610212 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64bdbc5658-v8ngg" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.610209 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bdbc5658-v8ngg" event={"ID":"0a307c19-4be2-44f2-8034-00ebfa265aac","Type":"ContainerDied","Data":"0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627"} Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.610262 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64bdbc5658-v8ngg" event={"ID":"0a307c19-4be2-44f2-8034-00ebfa265aac","Type":"ContainerDied","Data":"845fe27f408879e3d827a81fb1edb1197e5d46171263a5d932dba685bf13577b"} Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.610279 4922 scope.go:117] "RemoveContainer" containerID="ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.635379 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05","Type":"ContainerStarted","Data":"e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114"} Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.659915 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-64bdbc5658-v8ngg"] Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.666866 4922 scope.go:117] "RemoveContainer" containerID="0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.667285 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-64bdbc5658-v8ngg"] Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.712241 4922 scope.go:117] "RemoveContainer" containerID="ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113" Nov 28 07:16:24 crc kubenswrapper[4922]: E1128 07:16:24.712903 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113\": container with ID starting with ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113 not found: ID does not exist" containerID="ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.712967 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113"} err="failed to get container status \"ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113\": rpc error: code = NotFound desc = could not find container \"ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113\": container with ID starting with ee87d6baf2278863606c76a6db01053ad9f2bec4808b7a1028c675346742e113 not found: ID does not exist" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.713034 4922 scope.go:117] "RemoveContainer" containerID="0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627" Nov 28 07:16:24 crc kubenswrapper[4922]: E1128 07:16:24.713384 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627\": container with ID starting with 0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627 not found: ID does not exist" containerID="0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627" Nov 28 07:16:24 crc kubenswrapper[4922]: I1128 07:16:24.713515 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627"} err="failed to get container status \"0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627\": rpc error: code = NotFound desc = could not find container \"0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627\": container with ID starting with 0b160c83632765e7deb15535e5fa5a2be7c7cf5b8017005d33b2b358704f3627 not found: ID does not exist" Nov 28 07:16:25 crc kubenswrapper[4922]: I1128 07:16:25.408541 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" path="/var/lib/kubelet/pods/0a307c19-4be2-44f2-8034-00ebfa265aac/volumes" Nov 28 07:16:25 crc kubenswrapper[4922]: I1128 07:16:25.648345 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05","Type":"ContainerStarted","Data":"070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede"} Nov 28 07:16:25 crc kubenswrapper[4922]: I1128 07:16:25.680818 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.680802027 podStartE2EDuration="3.680802027s" podCreationTimestamp="2025-11-28 07:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:25.675385762 +0000 UTC m=+1430.595781374" watchObservedRunningTime="2025-11-28 07:16:25.680802027 +0000 UTC m=+1430.601197609" Nov 28 07:16:25 crc kubenswrapper[4922]: I1128 07:16:25.726838 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:16:26 crc kubenswrapper[4922]: I1128 07:16:26.912129 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-59488f5ff4-k2cvn" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:60908->10.217.0.158:9311: read: connection reset by peer" Nov 28 07:16:26 crc kubenswrapper[4922]: I1128 07:16:26.912178 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-59488f5ff4-k2cvn" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:60894->10.217.0.158:9311: read: connection reset by peer" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.311782 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.312098 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.312151 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.312789 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"81e37e3417d1f4f55a00b3a748b722590a99b434ea982924a0a5d757ceb112c8"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.312851 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://81e37e3417d1f4f55a00b3a748b722590a99b434ea982924a0a5d757ceb112c8" gracePeriod=600 Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.369363 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.496608 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tc5rw\" (UniqueName: \"kubernetes.io/projected/69d50d02-732b-47a3-b342-ff1acbb0af7f-kube-api-access-tc5rw\") pod \"69d50d02-732b-47a3-b342-ff1acbb0af7f\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.496713 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69d50d02-732b-47a3-b342-ff1acbb0af7f-logs\") pod \"69d50d02-732b-47a3-b342-ff1acbb0af7f\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.496841 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data-custom\") pod \"69d50d02-732b-47a3-b342-ff1acbb0af7f\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.496906 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data\") pod \"69d50d02-732b-47a3-b342-ff1acbb0af7f\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.497048 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-combined-ca-bundle\") pod \"69d50d02-732b-47a3-b342-ff1acbb0af7f\" (UID: \"69d50d02-732b-47a3-b342-ff1acbb0af7f\") " Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.498177 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69d50d02-732b-47a3-b342-ff1acbb0af7f-logs" (OuterVolumeSpecName: "logs") pod "69d50d02-732b-47a3-b342-ff1acbb0af7f" (UID: "69d50d02-732b-47a3-b342-ff1acbb0af7f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.499020 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69d50d02-732b-47a3-b342-ff1acbb0af7f-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.502517 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69d50d02-732b-47a3-b342-ff1acbb0af7f-kube-api-access-tc5rw" (OuterVolumeSpecName: "kube-api-access-tc5rw") pod "69d50d02-732b-47a3-b342-ff1acbb0af7f" (UID: "69d50d02-732b-47a3-b342-ff1acbb0af7f"). InnerVolumeSpecName "kube-api-access-tc5rw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.503484 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "69d50d02-732b-47a3-b342-ff1acbb0af7f" (UID: "69d50d02-732b-47a3-b342-ff1acbb0af7f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.525494 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69d50d02-732b-47a3-b342-ff1acbb0af7f" (UID: "69d50d02-732b-47a3-b342-ff1acbb0af7f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.560528 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data" (OuterVolumeSpecName: "config-data") pod "69d50d02-732b-47a3-b342-ff1acbb0af7f" (UID: "69d50d02-732b-47a3-b342-ff1acbb0af7f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.601410 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.601459 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.601481 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d50d02-732b-47a3-b342-ff1acbb0af7f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.601496 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tc5rw\" (UniqueName: \"kubernetes.io/projected/69d50d02-732b-47a3-b342-ff1acbb0af7f-kube-api-access-tc5rw\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.675256 4922 generic.go:334] "Generic (PLEG): container finished" podID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerID="dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1" exitCode=0 Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.675337 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-59488f5ff4-k2cvn" event={"ID":"69d50d02-732b-47a3-b342-ff1acbb0af7f","Type":"ContainerDied","Data":"dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1"} Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.675367 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-59488f5ff4-k2cvn" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.675395 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-59488f5ff4-k2cvn" event={"ID":"69d50d02-732b-47a3-b342-ff1acbb0af7f","Type":"ContainerDied","Data":"2d69f3183359e875eb0d9ca8f2d58ba399578d74b85e125394815f2df59777cb"} Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.675415 4922 scope.go:117] "RemoveContainer" containerID="dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.679247 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="81e37e3417d1f4f55a00b3a748b722590a99b434ea982924a0a5d757ceb112c8" exitCode=0 Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.679276 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"81e37e3417d1f4f55a00b3a748b722590a99b434ea982924a0a5d757ceb112c8"} Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.679301 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f"} Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.730461 4922 scope.go:117] "RemoveContainer" containerID="e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.737623 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-59488f5ff4-k2cvn"] Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.746089 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-59488f5ff4-k2cvn"] Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.765884 4922 scope.go:117] "RemoveContainer" containerID="dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1" Nov 28 07:16:27 crc kubenswrapper[4922]: E1128 07:16:27.767423 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1\": container with ID starting with dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1 not found: ID does not exist" containerID="dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.767463 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1"} err="failed to get container status \"dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1\": rpc error: code = NotFound desc = could not find container \"dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1\": container with ID starting with dcd763ee8cb626b51c7b9e6adecf7b30b817f639e88a1774228b08dd526ce2f1 not found: ID does not exist" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.767487 4922 scope.go:117] "RemoveContainer" containerID="e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec" Nov 28 07:16:27 crc kubenswrapper[4922]: E1128 07:16:27.767847 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec\": container with ID starting with e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec not found: ID does not exist" containerID="e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.767871 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec"} err="failed to get container status \"e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec\": rpc error: code = NotFound desc = could not find container \"e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec\": container with ID starting with e2a351a4a54a3ccf67ea907ebc75c31bb097dfde0b8ed6cfd629d5fa6d4800ec not found: ID does not exist" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.768040 4922 scope.go:117] "RemoveContainer" containerID="74644ed2805eab754b767a063d8b9fa8b033ceca1db2f16aed9a8b2d915a2091" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.979489 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.981796 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 07:16:27 crc kubenswrapper[4922]: E1128 07:16:27.982290 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.982316 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api" Nov 28 07:16:27 crc kubenswrapper[4922]: E1128 07:16:27.982331 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerName="neutron-api" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.982340 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerName="neutron-api" Nov 28 07:16:27 crc kubenswrapper[4922]: E1128 07:16:27.982353 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerName="neutron-httpd" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.982361 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerName="neutron-httpd" Nov 28 07:16:27 crc kubenswrapper[4922]: E1128 07:16:27.982393 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api-log" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.982402 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api-log" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.982630 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.982665 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" containerName="barbican-api-log" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.982683 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerName="neutron-httpd" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.982697 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a307c19-4be2-44f2-8034-00ebfa265aac" containerName="neutron-api" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.983430 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.988363 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.988516 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-xggcc" Nov 28 07:16:27 crc kubenswrapper[4922]: I1128 07:16:27.988630 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.011395 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.113712 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config-secret\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.115173 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.115439 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbzbw\" (UniqueName: \"kubernetes.io/projected/264e478f-8337-4f40-b005-84a7cd802eaa-kube-api-access-bbzbw\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.115624 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-combined-ca-bundle\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.216708 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config-secret\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.216805 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.216840 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbzbw\" (UniqueName: \"kubernetes.io/projected/264e478f-8337-4f40-b005-84a7cd802eaa-kube-api-access-bbzbw\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.216921 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-combined-ca-bundle\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.218647 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.223133 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config-secret\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.229967 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-combined-ca-bundle\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.252890 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbzbw\" (UniqueName: \"kubernetes.io/projected/264e478f-8337-4f40-b005-84a7cd802eaa-kube-api-access-bbzbw\") pod \"openstackclient\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.350913 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 07:16:28 crc kubenswrapper[4922]: I1128 07:16:28.796230 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 07:16:28 crc kubenswrapper[4922]: W1128 07:16:28.801097 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod264e478f_8337_4f40_b005_84a7cd802eaa.slice/crio-f51dc12466cd54b432bc566c406c5016dbe8e3b6c56f9bcd22fcfb7f32671cfb WatchSource:0}: Error finding container f51dc12466cd54b432bc566c406c5016dbe8e3b6c56f9bcd22fcfb7f32671cfb: Status 404 returned error can't find the container with id f51dc12466cd54b432bc566c406c5016dbe8e3b6c56f9bcd22fcfb7f32671cfb Nov 28 07:16:29 crc kubenswrapper[4922]: I1128 07:16:29.413545 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69d50d02-732b-47a3-b342-ff1acbb0af7f" path="/var/lib/kubelet/pods/69d50d02-732b-47a3-b342-ff1acbb0af7f/volumes" Nov 28 07:16:29 crc kubenswrapper[4922]: I1128 07:16:29.700504 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"264e478f-8337-4f40-b005-84a7cd802eaa","Type":"ContainerStarted","Data":"f51dc12466cd54b432bc566c406c5016dbe8e3b6c56f9bcd22fcfb7f32671cfb"} Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.687541 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.688319 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="ceilometer-central-agent" containerID="cri-o://4813e7caa96017317260102272646b375cfb0680074050a84464f8277f0d586f" gracePeriod=30 Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.691048 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="sg-core" containerID="cri-o://a9691659d3ff87f88c40b63c23edb6d642acab95cce0a8ee57f6220a5ee64034" gracePeriod=30 Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.691186 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="proxy-httpd" containerID="cri-o://24c474c965b970a0b3c56c3c7e70c4cae7c1ae26b1b3ea9a8673e89232f551af" gracePeriod=30 Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.691282 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="ceilometer-notification-agent" containerID="cri-o://8f93d8f601f3f004d218215d678891da73bd9d1c7c1fafa0468e8c068a5d027f" gracePeriod=30 Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.704372 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.818310 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-cc5b55cb5-8tgkn"] Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.820041 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.825670 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.825938 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.826157 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.848721 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-cc5b55cb5-8tgkn"] Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.890276 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-combined-ca-bundle\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.890319 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-run-httpd\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.890344 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-config-data\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.890359 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-log-httpd\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.890382 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-public-tls-certs\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.890399 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-internal-tls-certs\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.890452 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-etc-swift\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.890491 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjjxw\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-kube-api-access-tjjxw\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.994353 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjjxw\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-kube-api-access-tjjxw\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.994460 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-combined-ca-bundle\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.994490 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-run-httpd\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.994514 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-config-data\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.994535 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-log-httpd\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.994568 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-public-tls-certs\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.994591 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-internal-tls-certs\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.994659 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-etc-swift\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.996779 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-log-httpd\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:31 crc kubenswrapper[4922]: I1128 07:16:31.996860 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-run-httpd\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.002033 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-combined-ca-bundle\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.003518 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-etc-swift\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.003625 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-internal-tls-certs\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.003714 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-public-tls-certs\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.009451 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-config-data\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.018809 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjjxw\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-kube-api-access-tjjxw\") pod \"swift-proxy-cc5b55cb5-8tgkn\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.204350 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.735842 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-cc5b55cb5-8tgkn"] Nov 28 07:16:32 crc kubenswrapper[4922]: W1128 07:16:32.738211 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc10f3b66_a7e0_4690_939a_5938de689b3a.slice/crio-8066483262cfa9fff7a1529c49ca1ea9c235d6c9dadc545550e5346dee8a9c67 WatchSource:0}: Error finding container 8066483262cfa9fff7a1529c49ca1ea9c235d6c9dadc545550e5346dee8a9c67: Status 404 returned error can't find the container with id 8066483262cfa9fff7a1529c49ca1ea9c235d6c9dadc545550e5346dee8a9c67 Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.754410 4922 generic.go:334] "Generic (PLEG): container finished" podID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerID="24c474c965b970a0b3c56c3c7e70c4cae7c1ae26b1b3ea9a8673e89232f551af" exitCode=0 Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.754467 4922 generic.go:334] "Generic (PLEG): container finished" podID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerID="a9691659d3ff87f88c40b63c23edb6d642acab95cce0a8ee57f6220a5ee64034" exitCode=2 Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.754479 4922 generic.go:334] "Generic (PLEG): container finished" podID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerID="4813e7caa96017317260102272646b375cfb0680074050a84464f8277f0d586f" exitCode=0 Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.754506 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerDied","Data":"24c474c965b970a0b3c56c3c7e70c4cae7c1ae26b1b3ea9a8673e89232f551af"} Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.754552 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerDied","Data":"a9691659d3ff87f88c40b63c23edb6d642acab95cce0a8ee57f6220a5ee64034"} Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.754564 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerDied","Data":"4813e7caa96017317260102272646b375cfb0680074050a84464f8277f0d586f"} Nov 28 07:16:32 crc kubenswrapper[4922]: I1128 07:16:32.756561 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" event={"ID":"c10f3b66-a7e0-4690-939a-5938de689b3a","Type":"ContainerStarted","Data":"8066483262cfa9fff7a1529c49ca1ea9c235d6c9dadc545550e5346dee8a9c67"} Nov 28 07:16:33 crc kubenswrapper[4922]: I1128 07:16:33.203884 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 07:16:33 crc kubenswrapper[4922]: I1128 07:16:33.769989 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" event={"ID":"c10f3b66-a7e0-4690-939a-5938de689b3a","Type":"ContainerStarted","Data":"2cd63c61c70658881bcd06ab76b9b69600a15655926ae05749a525c4458f85a1"} Nov 28 07:16:34 crc kubenswrapper[4922]: I1128 07:16:34.781827 4922 generic.go:334] "Generic (PLEG): container finished" podID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerID="8f93d8f601f3f004d218215d678891da73bd9d1c7c1fafa0468e8c068a5d027f" exitCode=0 Nov 28 07:16:34 crc kubenswrapper[4922]: I1128 07:16:34.782025 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerDied","Data":"8f93d8f601f3f004d218215d678891da73bd9d1c7c1fafa0468e8c068a5d027f"} Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.170572 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-85r25"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.171961 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.182130 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-85r25"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.244945 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-8tbc2"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.247169 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.255345 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-8tbc2"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.271101 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxzmb\" (UniqueName: \"kubernetes.io/projected/c77ee72a-1c8d-4994-8d07-a0338a55489a-kube-api-access-vxzmb\") pod \"nova-api-db-create-85r25\" (UID: \"c77ee72a-1c8d-4994-8d07-a0338a55489a\") " pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.271157 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c77ee72a-1c8d-4994-8d07-a0338a55489a-operator-scripts\") pod \"nova-api-db-create-85r25\" (UID: \"c77ee72a-1c8d-4994-8d07-a0338a55489a\") " pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.352278 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-qp85z"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.353687 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.362646 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-78f1-account-create-update-chz7p"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.363745 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.371205 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.372262 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxzmb\" (UniqueName: \"kubernetes.io/projected/c77ee72a-1c8d-4994-8d07-a0338a55489a-kube-api-access-vxzmb\") pod \"nova-api-db-create-85r25\" (UID: \"c77ee72a-1c8d-4994-8d07-a0338a55489a\") " pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.372457 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c77ee72a-1c8d-4994-8d07-a0338a55489a-operator-scripts\") pod \"nova-api-db-create-85r25\" (UID: \"c77ee72a-1c8d-4994-8d07-a0338a55489a\") " pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.372600 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30134dd7-1b96-4057-8065-c3e59e70baff-operator-scripts\") pod \"nova-cell0-db-create-8tbc2\" (UID: \"30134dd7-1b96-4057-8065-c3e59e70baff\") " pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.372735 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqdd9\" (UniqueName: \"kubernetes.io/projected/30134dd7-1b96-4057-8065-c3e59e70baff-kube-api-access-vqdd9\") pod \"nova-cell0-db-create-8tbc2\" (UID: \"30134dd7-1b96-4057-8065-c3e59e70baff\") " pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.373128 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c77ee72a-1c8d-4994-8d07-a0338a55489a-operator-scripts\") pod \"nova-api-db-create-85r25\" (UID: \"c77ee72a-1c8d-4994-8d07-a0338a55489a\") " pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.388482 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-78f1-account-create-update-chz7p"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.391096 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxzmb\" (UniqueName: \"kubernetes.io/projected/c77ee72a-1c8d-4994-8d07-a0338a55489a-kube-api-access-vxzmb\") pod \"nova-api-db-create-85r25\" (UID: \"c77ee72a-1c8d-4994-8d07-a0338a55489a\") " pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.411711 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-qp85z"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.474819 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz694\" (UniqueName: \"kubernetes.io/projected/b1e1fd22-8937-4a32-bf98-06e3655deb07-kube-api-access-lz694\") pod \"nova-api-78f1-account-create-update-chz7p\" (UID: \"b1e1fd22-8937-4a32-bf98-06e3655deb07\") " pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.474886 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1e1fd22-8937-4a32-bf98-06e3655deb07-operator-scripts\") pod \"nova-api-78f1-account-create-update-chz7p\" (UID: \"b1e1fd22-8937-4a32-bf98-06e3655deb07\") " pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.474908 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqdd9\" (UniqueName: \"kubernetes.io/projected/30134dd7-1b96-4057-8065-c3e59e70baff-kube-api-access-vqdd9\") pod \"nova-cell0-db-create-8tbc2\" (UID: \"30134dd7-1b96-4057-8065-c3e59e70baff\") " pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.474957 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/daea5abd-a0b5-4504-a7e5-53ec88446745-operator-scripts\") pod \"nova-cell1-db-create-qp85z\" (UID: \"daea5abd-a0b5-4504-a7e5-53ec88446745\") " pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.475086 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30134dd7-1b96-4057-8065-c3e59e70baff-operator-scripts\") pod \"nova-cell0-db-create-8tbc2\" (UID: \"30134dd7-1b96-4057-8065-c3e59e70baff\") " pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.475124 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5r5v\" (UniqueName: \"kubernetes.io/projected/daea5abd-a0b5-4504-a7e5-53ec88446745-kube-api-access-l5r5v\") pod \"nova-cell1-db-create-qp85z\" (UID: \"daea5abd-a0b5-4504-a7e5-53ec88446745\") " pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.477258 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30134dd7-1b96-4057-8065-c3e59e70baff-operator-scripts\") pod \"nova-cell0-db-create-8tbc2\" (UID: \"30134dd7-1b96-4057-8065-c3e59e70baff\") " pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.492049 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.492977 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqdd9\" (UniqueName: \"kubernetes.io/projected/30134dd7-1b96-4057-8065-c3e59e70baff-kube-api-access-vqdd9\") pod \"nova-cell0-db-create-8tbc2\" (UID: \"30134dd7-1b96-4057-8065-c3e59e70baff\") " pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.556238 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-32a6-account-create-update-j752d"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.560826 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.563146 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.578458 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1e1fd22-8937-4a32-bf98-06e3655deb07-operator-scripts\") pod \"nova-api-78f1-account-create-update-chz7p\" (UID: \"b1e1fd22-8937-4a32-bf98-06e3655deb07\") " pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.578521 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/daea5abd-a0b5-4504-a7e5-53ec88446745-operator-scripts\") pod \"nova-cell1-db-create-qp85z\" (UID: \"daea5abd-a0b5-4504-a7e5-53ec88446745\") " pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.579416 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1e1fd22-8937-4a32-bf98-06e3655deb07-operator-scripts\") pod \"nova-api-78f1-account-create-update-chz7p\" (UID: \"b1e1fd22-8937-4a32-bf98-06e3655deb07\") " pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.579590 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5r5v\" (UniqueName: \"kubernetes.io/projected/daea5abd-a0b5-4504-a7e5-53ec88446745-kube-api-access-l5r5v\") pod \"nova-cell1-db-create-qp85z\" (UID: \"daea5abd-a0b5-4504-a7e5-53ec88446745\") " pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.579640 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz694\" (UniqueName: \"kubernetes.io/projected/b1e1fd22-8937-4a32-bf98-06e3655deb07-kube-api-access-lz694\") pod \"nova-api-78f1-account-create-update-chz7p\" (UID: \"b1e1fd22-8937-4a32-bf98-06e3655deb07\") " pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.579729 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/daea5abd-a0b5-4504-a7e5-53ec88446745-operator-scripts\") pod \"nova-cell1-db-create-qp85z\" (UID: \"daea5abd-a0b5-4504-a7e5-53ec88446745\") " pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.581067 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-32a6-account-create-update-j752d"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.597480 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz694\" (UniqueName: \"kubernetes.io/projected/b1e1fd22-8937-4a32-bf98-06e3655deb07-kube-api-access-lz694\") pod \"nova-api-78f1-account-create-update-chz7p\" (UID: \"b1e1fd22-8937-4a32-bf98-06e3655deb07\") " pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.605652 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5r5v\" (UniqueName: \"kubernetes.io/projected/daea5abd-a0b5-4504-a7e5-53ec88446745-kube-api-access-l5r5v\") pod \"nova-cell1-db-create-qp85z\" (UID: \"daea5abd-a0b5-4504-a7e5-53ec88446745\") " pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.607301 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.678520 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.682494 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bk9qv\" (UniqueName: \"kubernetes.io/projected/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-kube-api-access-bk9qv\") pod \"nova-cell0-32a6-account-create-update-j752d\" (UID: \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\") " pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.682536 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-operator-scripts\") pod \"nova-cell0-32a6-account-create-update-j752d\" (UID: \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\") " pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.686112 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.762150 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-18f0-account-create-update-4rjkq"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.763302 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.765764 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.772101 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-18f0-account-create-update-4rjkq"] Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.784206 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bk9qv\" (UniqueName: \"kubernetes.io/projected/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-kube-api-access-bk9qv\") pod \"nova-cell0-32a6-account-create-update-j752d\" (UID: \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\") " pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.784293 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-operator-scripts\") pod \"nova-cell0-32a6-account-create-update-j752d\" (UID: \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\") " pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.785234 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-operator-scripts\") pod \"nova-cell0-32a6-account-create-update-j752d\" (UID: \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\") " pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.801415 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bk9qv\" (UniqueName: \"kubernetes.io/projected/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-kube-api-access-bk9qv\") pod \"nova-cell0-32a6-account-create-update-j752d\" (UID: \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\") " pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.889243 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.923827 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/067b3033-51f7-4b75-a86b-f3e666119c7f-operator-scripts\") pod \"nova-cell1-18f0-account-create-update-4rjkq\" (UID: \"067b3033-51f7-4b75-a86b-f3e666119c7f\") " pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:36 crc kubenswrapper[4922]: I1128 07:16:36.923972 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sf4dh\" (UniqueName: \"kubernetes.io/projected/067b3033-51f7-4b75-a86b-f3e666119c7f-kube-api-access-sf4dh\") pod \"nova-cell1-18f0-account-create-update-4rjkq\" (UID: \"067b3033-51f7-4b75-a86b-f3e666119c7f\") " pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:37 crc kubenswrapper[4922]: I1128 07:16:37.025711 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/067b3033-51f7-4b75-a86b-f3e666119c7f-operator-scripts\") pod \"nova-cell1-18f0-account-create-update-4rjkq\" (UID: \"067b3033-51f7-4b75-a86b-f3e666119c7f\") " pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:37 crc kubenswrapper[4922]: I1128 07:16:37.025792 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sf4dh\" (UniqueName: \"kubernetes.io/projected/067b3033-51f7-4b75-a86b-f3e666119c7f-kube-api-access-sf4dh\") pod \"nova-cell1-18f0-account-create-update-4rjkq\" (UID: \"067b3033-51f7-4b75-a86b-f3e666119c7f\") " pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:37 crc kubenswrapper[4922]: I1128 07:16:37.026808 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/067b3033-51f7-4b75-a86b-f3e666119c7f-operator-scripts\") pod \"nova-cell1-18f0-account-create-update-4rjkq\" (UID: \"067b3033-51f7-4b75-a86b-f3e666119c7f\") " pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:37 crc kubenswrapper[4922]: I1128 07:16:37.046287 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sf4dh\" (UniqueName: \"kubernetes.io/projected/067b3033-51f7-4b75-a86b-f3e666119c7f-kube-api-access-sf4dh\") pod \"nova-cell1-18f0-account-create-update-4rjkq\" (UID: \"067b3033-51f7-4b75-a86b-f3e666119c7f\") " pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:37 crc kubenswrapper[4922]: I1128 07:16:37.092106 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.854907 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9abee755-a9bf-49f0-85ad-e5c58f2e35c2","Type":"ContainerDied","Data":"d73add646b1c847abfeacd9d36c803085131b91826db108b4d0d7ba0b0e9b646"} Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.855416 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d73add646b1c847abfeacd9d36c803085131b91826db108b4d0d7ba0b0e9b646" Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.858233 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.970580 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-config-data\") pod \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.970657 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-combined-ca-bundle\") pod \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.970706 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-run-httpd\") pod \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.970762 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlbl6\" (UniqueName: \"kubernetes.io/projected/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-kube-api-access-vlbl6\") pod \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.970856 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-log-httpd\") pod \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.970870 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-scripts\") pod \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.970894 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-sg-core-conf-yaml\") pod \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\" (UID: \"9abee755-a9bf-49f0-85ad-e5c58f2e35c2\") " Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.971834 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9abee755-a9bf-49f0-85ad-e5c58f2e35c2" (UID: "9abee755-a9bf-49f0-85ad-e5c58f2e35c2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.972122 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9abee755-a9bf-49f0-85ad-e5c58f2e35c2" (UID: "9abee755-a9bf-49f0-85ad-e5c58f2e35c2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.975986 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-kube-api-access-vlbl6" (OuterVolumeSpecName: "kube-api-access-vlbl6") pod "9abee755-a9bf-49f0-85ad-e5c58f2e35c2" (UID: "9abee755-a9bf-49f0-85ad-e5c58f2e35c2"). InnerVolumeSpecName "kube-api-access-vlbl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:38 crc kubenswrapper[4922]: I1128 07:16:38.977660 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-scripts" (OuterVolumeSpecName: "scripts") pod "9abee755-a9bf-49f0-85ad-e5c58f2e35c2" (UID: "9abee755-a9bf-49f0-85ad-e5c58f2e35c2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.003483 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9abee755-a9bf-49f0-85ad-e5c58f2e35c2" (UID: "9abee755-a9bf-49f0-85ad-e5c58f2e35c2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.045359 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9abee755-a9bf-49f0-85ad-e5c58f2e35c2" (UID: "9abee755-a9bf-49f0-85ad-e5c58f2e35c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.072616 4922 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.072820 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlbl6\" (UniqueName: \"kubernetes.io/projected/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-kube-api-access-vlbl6\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.072889 4922 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.072947 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.073001 4922 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.073055 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.076396 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-config-data" (OuterVolumeSpecName: "config-data") pod "9abee755-a9bf-49f0-85ad-e5c58f2e35c2" (UID: "9abee755-a9bf-49f0-85ad-e5c58f2e35c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.174487 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9abee755-a9bf-49f0-85ad-e5c58f2e35c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:39 crc kubenswrapper[4922]: W1128 07:16:39.224008 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod067b3033_51f7_4b75_a86b_f3e666119c7f.slice/crio-14ec67d678d2e36069b259d925ca1fc08fea7fd938677eba96fdc7dd600a9ca1 WatchSource:0}: Error finding container 14ec67d678d2e36069b259d925ca1fc08fea7fd938677eba96fdc7dd600a9ca1: Status 404 returned error can't find the container with id 14ec67d678d2e36069b259d925ca1fc08fea7fd938677eba96fdc7dd600a9ca1 Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.227867 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-18f0-account-create-update-4rjkq"] Nov 28 07:16:39 crc kubenswrapper[4922]: W1128 07:16:39.228073 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30134dd7_1b96_4057_8065_c3e59e70baff.slice/crio-f647be4de497b7f49605bd9e261ef8923a7e24190c4c04b49be479bb074631fa WatchSource:0}: Error finding container f647be4de497b7f49605bd9e261ef8923a7e24190c4c04b49be479bb074631fa: Status 404 returned error can't find the container with id f647be4de497b7f49605bd9e261ef8923a7e24190c4c04b49be479bb074631fa Nov 28 07:16:39 crc kubenswrapper[4922]: W1128 07:16:39.229554 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc77ee72a_1c8d_4994_8d07_a0338a55489a.slice/crio-d623ebb8f0d56a809032a9f50693365251e62569c9449e6b8bcf29b7195f30ae WatchSource:0}: Error finding container d623ebb8f0d56a809032a9f50693365251e62569c9449e6b8bcf29b7195f30ae: Status 404 returned error can't find the container with id d623ebb8f0d56a809032a9f50693365251e62569c9449e6b8bcf29b7195f30ae Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.240397 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-8tbc2"] Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.253717 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-85r25"] Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.353408 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-32a6-account-create-update-j752d"] Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.360739 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-qp85z"] Nov 28 07:16:39 crc kubenswrapper[4922]: W1128 07:16:39.364786 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddaea5abd_a0b5_4504_a7e5_53ec88446745.slice/crio-b98c010720474472acf8717c101bfe9dad56d9117573b9c56d77dd32c32c4788 WatchSource:0}: Error finding container b98c010720474472acf8717c101bfe9dad56d9117573b9c56d77dd32c32c4788: Status 404 returned error can't find the container with id b98c010720474472acf8717c101bfe9dad56d9117573b9c56d77dd32c32c4788 Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.496155 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-78f1-account-create-update-chz7p"] Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.873169 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-32a6-account-create-update-j752d" event={"ID":"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82","Type":"ContainerStarted","Data":"59b955d5f5e239d9256f177532927e0d6ad988d595cf7e2a850f169518ab9d6f"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.873545 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-32a6-account-create-update-j752d" event={"ID":"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82","Type":"ContainerStarted","Data":"0cdd70aa8782ab43628486eaffbf018b2839ea2c8776b9193ff1b1d9b0e5d66e"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.877673 4922 generic.go:334] "Generic (PLEG): container finished" podID="c77ee72a-1c8d-4994-8d07-a0338a55489a" containerID="3e9e34bfe759256fa864acd8b367abab7da733b2eac9f67bae9e4abbbf926f5e" exitCode=0 Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.877801 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-85r25" event={"ID":"c77ee72a-1c8d-4994-8d07-a0338a55489a","Type":"ContainerDied","Data":"3e9e34bfe759256fa864acd8b367abab7da733b2eac9f67bae9e4abbbf926f5e"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.877826 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-85r25" event={"ID":"c77ee72a-1c8d-4994-8d07-a0338a55489a","Type":"ContainerStarted","Data":"d623ebb8f0d56a809032a9f50693365251e62569c9449e6b8bcf29b7195f30ae"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.879873 4922 generic.go:334] "Generic (PLEG): container finished" podID="30134dd7-1b96-4057-8065-c3e59e70baff" containerID="544b8ca487961ebba0e9ae2fe55cddd16f56331a6a8dc4a0178b8e21bcbd479b" exitCode=0 Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.879950 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8tbc2" event={"ID":"30134dd7-1b96-4057-8065-c3e59e70baff","Type":"ContainerDied","Data":"544b8ca487961ebba0e9ae2fe55cddd16f56331a6a8dc4a0178b8e21bcbd479b"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.879975 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8tbc2" event={"ID":"30134dd7-1b96-4057-8065-c3e59e70baff","Type":"ContainerStarted","Data":"f647be4de497b7f49605bd9e261ef8923a7e24190c4c04b49be479bb074631fa"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.883231 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"264e478f-8337-4f40-b005-84a7cd802eaa","Type":"ContainerStarted","Data":"6d78c8d0fed333630cf5081b1caacc151cab7db6d74c7fe5e06ec18ea0087bce"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.887667 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" event={"ID":"c10f3b66-a7e0-4690-939a-5938de689b3a","Type":"ContainerStarted","Data":"0c46d19aa54b7ae1c585537a2f9d7f0d49af908f52150fea5f7f93185d9dc261"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.887785 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.887941 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.901279 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-qp85z" event={"ID":"daea5abd-a0b5-4504-a7e5-53ec88446745","Type":"ContainerStarted","Data":"f3d2d07c15c9482f58a3ac7590f85b4346618b0eeecb29109666f5bb7d97c88b"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.901333 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-qp85z" event={"ID":"daea5abd-a0b5-4504-a7e5-53ec88446745","Type":"ContainerStarted","Data":"b98c010720474472acf8717c101bfe9dad56d9117573b9c56d77dd32c32c4788"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.912414 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.914641 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-32a6-account-create-update-j752d" podStartSLOduration=3.914619632 podStartE2EDuration="3.914619632s" podCreationTimestamp="2025-11-28 07:16:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:39.888191969 +0000 UTC m=+1444.808587551" watchObservedRunningTime="2025-11-28 07:16:39.914619632 +0000 UTC m=+1444.835015224" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.915997 4922 generic.go:334] "Generic (PLEG): container finished" podID="067b3033-51f7-4b75-a86b-f3e666119c7f" containerID="4da5e05550131fabfd117c197bf055d5d73de16102d7399e3b3054b31ac41369" exitCode=0 Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.916080 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" event={"ID":"067b3033-51f7-4b75-a86b-f3e666119c7f","Type":"ContainerDied","Data":"4da5e05550131fabfd117c197bf055d5d73de16102d7399e3b3054b31ac41369"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.916114 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" event={"ID":"067b3033-51f7-4b75-a86b-f3e666119c7f","Type":"ContainerStarted","Data":"14ec67d678d2e36069b259d925ca1fc08fea7fd938677eba96fdc7dd600a9ca1"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.918192 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.918288 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-78f1-account-create-update-chz7p" event={"ID":"b1e1fd22-8937-4a32-bf98-06e3655deb07","Type":"ContainerStarted","Data":"fa47c7971f018e6f78ebe16ae788ea0ffef357beb5f780cf216345ce77143bae"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.918313 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-78f1-account-create-update-chz7p" event={"ID":"b1e1fd22-8937-4a32-bf98-06e3655deb07","Type":"ContainerStarted","Data":"331f7c32cb174a9fb14d83317d56345d81497b8b76f1a69467099333d1975aca"} Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.966577 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" podStartSLOduration=8.966555774 podStartE2EDuration="8.966555774s" podCreationTimestamp="2025-11-28 07:16:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:39.928604554 +0000 UTC m=+1444.849000156" watchObservedRunningTime="2025-11-28 07:16:39.966555774 +0000 UTC m=+1444.886951356" Nov 28 07:16:39 crc kubenswrapper[4922]: I1128 07:16:39.971892 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.147283342 podStartE2EDuration="12.971875406s" podCreationTimestamp="2025-11-28 07:16:27 +0000 UTC" firstStartedPulling="2025-11-28 07:16:28.803117989 +0000 UTC m=+1433.723513571" lastFinishedPulling="2025-11-28 07:16:38.627710053 +0000 UTC m=+1443.548105635" observedRunningTime="2025-11-28 07:16:39.951644497 +0000 UTC m=+1444.872040089" watchObservedRunningTime="2025-11-28 07:16:39.971875406 +0000 UTC m=+1444.892270988" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.028294 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.063338 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.101300 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:40 crc kubenswrapper[4922]: E1128 07:16:40.101757 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="ceilometer-central-agent" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.101772 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="ceilometer-central-agent" Nov 28 07:16:40 crc kubenswrapper[4922]: E1128 07:16:40.101789 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="proxy-httpd" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.101797 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="proxy-httpd" Nov 28 07:16:40 crc kubenswrapper[4922]: E1128 07:16:40.101823 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="sg-core" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.101830 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="sg-core" Nov 28 07:16:40 crc kubenswrapper[4922]: E1128 07:16:40.101852 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="ceilometer-notification-agent" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.101857 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="ceilometer-notification-agent" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.102018 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="ceilometer-central-agent" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.102041 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="ceilometer-notification-agent" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.102050 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="sg-core" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.102065 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" containerName="proxy-httpd" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.103659 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.110713 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.110948 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.111563 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.113862 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-78f1-account-create-update-chz7p" podStartSLOduration=4.113838592 podStartE2EDuration="4.113838592s" podCreationTimestamp="2025-11-28 07:16:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:40.041035615 +0000 UTC m=+1444.961431197" watchObservedRunningTime="2025-11-28 07:16:40.113838592 +0000 UTC m=+1445.034234174" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.128246 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-qp85z" podStartSLOduration=4.128203394 podStartE2EDuration="4.128203394s" podCreationTimestamp="2025-11-28 07:16:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:40.055573012 +0000 UTC m=+1444.975968594" watchObservedRunningTime="2025-11-28 07:16:40.128203394 +0000 UTC m=+1445.048598976" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.195133 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.195204 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-scripts\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.195244 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-run-httpd\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.195298 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.195343 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-config-data\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.195370 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdzg9\" (UniqueName: \"kubernetes.io/projected/ca306ec1-da8e-4551-8cfe-b5030809c100-kube-api-access-cdzg9\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.195390 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-log-httpd\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.300261 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-log-httpd\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.300370 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.300422 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-scripts\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.300446 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-run-httpd\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.300521 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.300561 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-config-data\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.300597 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdzg9\" (UniqueName: \"kubernetes.io/projected/ca306ec1-da8e-4551-8cfe-b5030809c100-kube-api-access-cdzg9\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.302102 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-run-httpd\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.302102 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-log-httpd\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.307203 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.309981 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-scripts\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.314924 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-config-data\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.317057 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.318915 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdzg9\" (UniqueName: \"kubernetes.io/projected/ca306ec1-da8e-4551-8cfe-b5030809c100-kube-api-access-cdzg9\") pod \"ceilometer-0\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.486294 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.933543 4922 generic.go:334] "Generic (PLEG): container finished" podID="b1e1fd22-8937-4a32-bf98-06e3655deb07" containerID="fa47c7971f018e6f78ebe16ae788ea0ffef357beb5f780cf216345ce77143bae" exitCode=0 Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.933639 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-78f1-account-create-update-chz7p" event={"ID":"b1e1fd22-8937-4a32-bf98-06e3655deb07","Type":"ContainerDied","Data":"fa47c7971f018e6f78ebe16ae788ea0ffef357beb5f780cf216345ce77143bae"} Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.939116 4922 generic.go:334] "Generic (PLEG): container finished" podID="932af8b2-8cfa-4807-a6a4-c2cb82e2bf82" containerID="59b955d5f5e239d9256f177532927e0d6ad988d595cf7e2a850f169518ab9d6f" exitCode=0 Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.939238 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-32a6-account-create-update-j752d" event={"ID":"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82","Type":"ContainerDied","Data":"59b955d5f5e239d9256f177532927e0d6ad988d595cf7e2a850f169518ab9d6f"} Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.945748 4922 generic.go:334] "Generic (PLEG): container finished" podID="daea5abd-a0b5-4504-a7e5-53ec88446745" containerID="f3d2d07c15c9482f58a3ac7590f85b4346618b0eeecb29109666f5bb7d97c88b" exitCode=0 Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.945844 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-qp85z" event={"ID":"daea5abd-a0b5-4504-a7e5-53ec88446745","Type":"ContainerDied","Data":"f3d2d07c15c9482f58a3ac7590f85b4346618b0eeecb29109666f5bb7d97c88b"} Nov 28 07:16:40 crc kubenswrapper[4922]: I1128 07:16:40.954934 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:40.996288 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.346563 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.411628 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9abee755-a9bf-49f0-85ad-e5c58f2e35c2" path="/var/lib/kubelet/pods/9abee755-a9bf-49f0-85ad-e5c58f2e35c2/volumes" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.420577 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/067b3033-51f7-4b75-a86b-f3e666119c7f-operator-scripts\") pod \"067b3033-51f7-4b75-a86b-f3e666119c7f\" (UID: \"067b3033-51f7-4b75-a86b-f3e666119c7f\") " Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.420758 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sf4dh\" (UniqueName: \"kubernetes.io/projected/067b3033-51f7-4b75-a86b-f3e666119c7f-kube-api-access-sf4dh\") pod \"067b3033-51f7-4b75-a86b-f3e666119c7f\" (UID: \"067b3033-51f7-4b75-a86b-f3e666119c7f\") " Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.421229 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/067b3033-51f7-4b75-a86b-f3e666119c7f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "067b3033-51f7-4b75-a86b-f3e666119c7f" (UID: "067b3033-51f7-4b75-a86b-f3e666119c7f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.425267 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.431979 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/067b3033-51f7-4b75-a86b-f3e666119c7f-kube-api-access-sf4dh" (OuterVolumeSpecName: "kube-api-access-sf4dh") pod "067b3033-51f7-4b75-a86b-f3e666119c7f" (UID: "067b3033-51f7-4b75-a86b-f3e666119c7f"). InnerVolumeSpecName "kube-api-access-sf4dh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.449047 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.522525 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxzmb\" (UniqueName: \"kubernetes.io/projected/c77ee72a-1c8d-4994-8d07-a0338a55489a-kube-api-access-vxzmb\") pod \"c77ee72a-1c8d-4994-8d07-a0338a55489a\" (UID: \"c77ee72a-1c8d-4994-8d07-a0338a55489a\") " Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.522581 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c77ee72a-1c8d-4994-8d07-a0338a55489a-operator-scripts\") pod \"c77ee72a-1c8d-4994-8d07-a0338a55489a\" (UID: \"c77ee72a-1c8d-4994-8d07-a0338a55489a\") " Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.522625 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30134dd7-1b96-4057-8065-c3e59e70baff-operator-scripts\") pod \"30134dd7-1b96-4057-8065-c3e59e70baff\" (UID: \"30134dd7-1b96-4057-8065-c3e59e70baff\") " Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.522694 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqdd9\" (UniqueName: \"kubernetes.io/projected/30134dd7-1b96-4057-8065-c3e59e70baff-kube-api-access-vqdd9\") pod \"30134dd7-1b96-4057-8065-c3e59e70baff\" (UID: \"30134dd7-1b96-4057-8065-c3e59e70baff\") " Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.523070 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sf4dh\" (UniqueName: \"kubernetes.io/projected/067b3033-51f7-4b75-a86b-f3e666119c7f-kube-api-access-sf4dh\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.523087 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/067b3033-51f7-4b75-a86b-f3e666119c7f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.523464 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30134dd7-1b96-4057-8065-c3e59e70baff-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "30134dd7-1b96-4057-8065-c3e59e70baff" (UID: "30134dd7-1b96-4057-8065-c3e59e70baff"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.523505 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c77ee72a-1c8d-4994-8d07-a0338a55489a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c77ee72a-1c8d-4994-8d07-a0338a55489a" (UID: "c77ee72a-1c8d-4994-8d07-a0338a55489a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.525633 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c77ee72a-1c8d-4994-8d07-a0338a55489a-kube-api-access-vxzmb" (OuterVolumeSpecName: "kube-api-access-vxzmb") pod "c77ee72a-1c8d-4994-8d07-a0338a55489a" (UID: "c77ee72a-1c8d-4994-8d07-a0338a55489a"). InnerVolumeSpecName "kube-api-access-vxzmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.525998 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30134dd7-1b96-4057-8065-c3e59e70baff-kube-api-access-vqdd9" (OuterVolumeSpecName: "kube-api-access-vqdd9") pod "30134dd7-1b96-4057-8065-c3e59e70baff" (UID: "30134dd7-1b96-4057-8065-c3e59e70baff"). InnerVolumeSpecName "kube-api-access-vqdd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.624522 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxzmb\" (UniqueName: \"kubernetes.io/projected/c77ee72a-1c8d-4994-8d07-a0338a55489a-kube-api-access-vxzmb\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.624826 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c77ee72a-1c8d-4994-8d07-a0338a55489a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.624955 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30134dd7-1b96-4057-8065-c3e59e70baff-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.625074 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqdd9\" (UniqueName: \"kubernetes.io/projected/30134dd7-1b96-4057-8065-c3e59e70baff-kube-api-access-vqdd9\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.955021 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.955030 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-18f0-account-create-update-4rjkq" event={"ID":"067b3033-51f7-4b75-a86b-f3e666119c7f","Type":"ContainerDied","Data":"14ec67d678d2e36069b259d925ca1fc08fea7fd938677eba96fdc7dd600a9ca1"} Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.956600 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14ec67d678d2e36069b259d925ca1fc08fea7fd938677eba96fdc7dd600a9ca1" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.957375 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerStarted","Data":"f420fa3ec3f7dae78434c1f846e451b7433d72afb4f833dd2d5434c9f7795d2a"} Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.958623 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-8tbc2" event={"ID":"30134dd7-1b96-4057-8065-c3e59e70baff","Type":"ContainerDied","Data":"f647be4de497b7f49605bd9e261ef8923a7e24190c4c04b49be479bb074631fa"} Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.958650 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f647be4de497b7f49605bd9e261ef8923a7e24190c4c04b49be479bb074631fa" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.958708 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-8tbc2" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.968920 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-85r25" Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.968955 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-85r25" event={"ID":"c77ee72a-1c8d-4994-8d07-a0338a55489a","Type":"ContainerDied","Data":"d623ebb8f0d56a809032a9f50693365251e62569c9449e6b8bcf29b7195f30ae"} Nov 28 07:16:41 crc kubenswrapper[4922]: I1128 07:16:41.969084 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d623ebb8f0d56a809032a9f50693365251e62569c9449e6b8bcf29b7195f30ae" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.272532 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.384730 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.390778 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.454797 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bk9qv\" (UniqueName: \"kubernetes.io/projected/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-kube-api-access-bk9qv\") pod \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\" (UID: \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\") " Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.454858 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1e1fd22-8937-4a32-bf98-06e3655deb07-operator-scripts\") pod \"b1e1fd22-8937-4a32-bf98-06e3655deb07\" (UID: \"b1e1fd22-8937-4a32-bf98-06e3655deb07\") " Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.454879 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5r5v\" (UniqueName: \"kubernetes.io/projected/daea5abd-a0b5-4504-a7e5-53ec88446745-kube-api-access-l5r5v\") pod \"daea5abd-a0b5-4504-a7e5-53ec88446745\" (UID: \"daea5abd-a0b5-4504-a7e5-53ec88446745\") " Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.455866 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1e1fd22-8937-4a32-bf98-06e3655deb07-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b1e1fd22-8937-4a32-bf98-06e3655deb07" (UID: "b1e1fd22-8937-4a32-bf98-06e3655deb07"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.456015 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz694\" (UniqueName: \"kubernetes.io/projected/b1e1fd22-8937-4a32-bf98-06e3655deb07-kube-api-access-lz694\") pod \"b1e1fd22-8937-4a32-bf98-06e3655deb07\" (UID: \"b1e1fd22-8937-4a32-bf98-06e3655deb07\") " Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.456041 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/daea5abd-a0b5-4504-a7e5-53ec88446745-operator-scripts\") pod \"daea5abd-a0b5-4504-a7e5-53ec88446745\" (UID: \"daea5abd-a0b5-4504-a7e5-53ec88446745\") " Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.456103 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-operator-scripts\") pod \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\" (UID: \"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82\") " Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.456679 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daea5abd-a0b5-4504-a7e5-53ec88446745-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "daea5abd-a0b5-4504-a7e5-53ec88446745" (UID: "daea5abd-a0b5-4504-a7e5-53ec88446745"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.456893 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "932af8b2-8cfa-4807-a6a4-c2cb82e2bf82" (UID: "932af8b2-8cfa-4807-a6a4-c2cb82e2bf82"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.456408 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1e1fd22-8937-4a32-bf98-06e3655deb07-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.461944 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daea5abd-a0b5-4504-a7e5-53ec88446745-kube-api-access-l5r5v" (OuterVolumeSpecName: "kube-api-access-l5r5v") pod "daea5abd-a0b5-4504-a7e5-53ec88446745" (UID: "daea5abd-a0b5-4504-a7e5-53ec88446745"). InnerVolumeSpecName "kube-api-access-l5r5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.462269 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-kube-api-access-bk9qv" (OuterVolumeSpecName: "kube-api-access-bk9qv") pod "932af8b2-8cfa-4807-a6a4-c2cb82e2bf82" (UID: "932af8b2-8cfa-4807-a6a4-c2cb82e2bf82"). InnerVolumeSpecName "kube-api-access-bk9qv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.463391 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1e1fd22-8937-4a32-bf98-06e3655deb07-kube-api-access-lz694" (OuterVolumeSpecName: "kube-api-access-lz694") pod "b1e1fd22-8937-4a32-bf98-06e3655deb07" (UID: "b1e1fd22-8937-4a32-bf98-06e3655deb07"). InnerVolumeSpecName "kube-api-access-lz694". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.559697 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.560053 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bk9qv\" (UniqueName: \"kubernetes.io/projected/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82-kube-api-access-bk9qv\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.560066 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5r5v\" (UniqueName: \"kubernetes.io/projected/daea5abd-a0b5-4504-a7e5-53ec88446745-kube-api-access-l5r5v\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.560078 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz694\" (UniqueName: \"kubernetes.io/projected/b1e1fd22-8937-4a32-bf98-06e3655deb07-kube-api-access-lz694\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.560092 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/daea5abd-a0b5-4504-a7e5-53ec88446745-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.837824 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.979435 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-32a6-account-create-update-j752d" event={"ID":"932af8b2-8cfa-4807-a6a4-c2cb82e2bf82","Type":"ContainerDied","Data":"0cdd70aa8782ab43628486eaffbf018b2839ea2c8776b9193ff1b1d9b0e5d66e"} Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.979470 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0cdd70aa8782ab43628486eaffbf018b2839ea2c8776b9193ff1b1d9b0e5d66e" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.979480 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-32a6-account-create-update-j752d" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.980759 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-qp85z" event={"ID":"daea5abd-a0b5-4504-a7e5-53ec88446745","Type":"ContainerDied","Data":"b98c010720474472acf8717c101bfe9dad56d9117573b9c56d77dd32c32c4788"} Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.980779 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b98c010720474472acf8717c101bfe9dad56d9117573b9c56d77dd32c32c4788" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.980815 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-qp85z" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.982082 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-78f1-account-create-update-chz7p" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.982191 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-78f1-account-create-update-chz7p" event={"ID":"b1e1fd22-8937-4a32-bf98-06e3655deb07","Type":"ContainerDied","Data":"331f7c32cb174a9fb14d83317d56345d81497b8b76f1a69467099333d1975aca"} Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.982307 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="331f7c32cb174a9fb14d83317d56345d81497b8b76f1a69467099333d1975aca" Nov 28 07:16:42 crc kubenswrapper[4922]: I1128 07:16:42.988106 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerStarted","Data":"4a00625a58f020d08df019dd1495902c432a469e48284ab1d739b7a8e0cf2e4b"} Nov 28 07:16:43 crc kubenswrapper[4922]: I1128 07:16:43.360929 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:16:43 crc kubenswrapper[4922]: I1128 07:16:43.361213 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerName="glance-log" containerID="cri-o://b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c" gracePeriod=30 Nov 28 07:16:43 crc kubenswrapper[4922]: I1128 07:16:43.361395 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerName="glance-httpd" containerID="cri-o://fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052" gracePeriod=30 Nov 28 07:16:44 crc kubenswrapper[4922]: I1128 07:16:44.003718 4922 generic.go:334] "Generic (PLEG): container finished" podID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerID="b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c" exitCode=143 Nov 28 07:16:44 crc kubenswrapper[4922]: I1128 07:16:44.003801 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"adb38287-ea0b-4e02-8bda-8022f86b0d81","Type":"ContainerDied","Data":"b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c"} Nov 28 07:16:44 crc kubenswrapper[4922]: I1128 07:16:44.007471 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerStarted","Data":"146255e849c42fb7d994de3b5de500fa1339cd40ae040fd55aa1b1a572228f15"} Nov 28 07:16:44 crc kubenswrapper[4922]: I1128 07:16:44.007501 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerStarted","Data":"8c233d204d94ede92a35bb4deb9e2d58dd580257d9c9cab116734656884e10cf"} Nov 28 07:16:45 crc kubenswrapper[4922]: I1128 07:16:45.346057 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:16:45 crc kubenswrapper[4922]: I1128 07:16:45.347625 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerName="glance-log" containerID="cri-o://f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40" gracePeriod=30 Nov 28 07:16:45 crc kubenswrapper[4922]: I1128 07:16:45.347706 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerName="glance-httpd" containerID="cri-o://e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877" gracePeriod=30 Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.039206 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerStarted","Data":"212eecce4deba65fb3c8573614978cbe1b17c325dddc6db6cb3ec4f44cf1941b"} Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.039623 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.039400 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="proxy-httpd" containerID="cri-o://212eecce4deba65fb3c8573614978cbe1b17c325dddc6db6cb3ec4f44cf1941b" gracePeriod=30 Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.039353 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="ceilometer-central-agent" containerID="cri-o://4a00625a58f020d08df019dd1495902c432a469e48284ab1d739b7a8e0cf2e4b" gracePeriod=30 Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.039453 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="ceilometer-notification-agent" containerID="cri-o://8c233d204d94ede92a35bb4deb9e2d58dd580257d9c9cab116734656884e10cf" gracePeriod=30 Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.039473 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="sg-core" containerID="cri-o://146255e849c42fb7d994de3b5de500fa1339cd40ae040fd55aa1b1a572228f15" gracePeriod=30 Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.054629 4922 generic.go:334] "Generic (PLEG): container finished" podID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerID="f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40" exitCode=143 Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.054670 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6b683f38-454d-454f-8e2f-66270d8b6ad4","Type":"ContainerDied","Data":"f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40"} Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.794125 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.588432932 podStartE2EDuration="7.793374799s" podCreationTimestamp="2025-11-28 07:16:39 +0000 UTC" firstStartedPulling="2025-11-28 07:16:41.055757583 +0000 UTC m=+1445.976153165" lastFinishedPulling="2025-11-28 07:16:45.26069944 +0000 UTC m=+1450.181095032" observedRunningTime="2025-11-28 07:16:46.075317513 +0000 UTC m=+1450.995713105" watchObservedRunningTime="2025-11-28 07:16:46.793374799 +0000 UTC m=+1451.713770381" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.806410 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr9rg"] Nov 28 07:16:46 crc kubenswrapper[4922]: E1128 07:16:46.806861 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1e1fd22-8937-4a32-bf98-06e3655deb07" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.806877 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1e1fd22-8937-4a32-bf98-06e3655deb07" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: E1128 07:16:46.806920 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daea5abd-a0b5-4504-a7e5-53ec88446745" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.806929 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="daea5abd-a0b5-4504-a7e5-53ec88446745" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: E1128 07:16:46.806945 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="067b3033-51f7-4b75-a86b-f3e666119c7f" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.806953 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="067b3033-51f7-4b75-a86b-f3e666119c7f" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: E1128 07:16:46.806964 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c77ee72a-1c8d-4994-8d07-a0338a55489a" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.806972 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c77ee72a-1c8d-4994-8d07-a0338a55489a" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: E1128 07:16:46.806983 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30134dd7-1b96-4057-8065-c3e59e70baff" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.806990 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="30134dd7-1b96-4057-8065-c3e59e70baff" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: E1128 07:16:46.807008 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="932af8b2-8cfa-4807-a6a4-c2cb82e2bf82" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.807016 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="932af8b2-8cfa-4807-a6a4-c2cb82e2bf82" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.807255 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="067b3033-51f7-4b75-a86b-f3e666119c7f" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.807270 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="daea5abd-a0b5-4504-a7e5-53ec88446745" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.807290 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="932af8b2-8cfa-4807-a6a4-c2cb82e2bf82" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.807299 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c77ee72a-1c8d-4994-8d07-a0338a55489a" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.807311 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1e1fd22-8937-4a32-bf98-06e3655deb07" containerName="mariadb-account-create-update" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.807327 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="30134dd7-1b96-4057-8065-c3e59e70baff" containerName="mariadb-database-create" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.808061 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.816280 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.816334 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-ptpqn" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.816479 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.818552 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr9rg"] Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.939371 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-config-data\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.939574 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-987pt\" (UniqueName: \"kubernetes.io/projected/0a538d8e-6665-46a0-bf05-2957a37bc9a2-kube-api-access-987pt\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.939631 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-scripts\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:46 crc kubenswrapper[4922]: I1128 07:16:46.939657 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.041294 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-config-data\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.041585 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-987pt\" (UniqueName: \"kubernetes.io/projected/0a538d8e-6665-46a0-bf05-2957a37bc9a2-kube-api-access-987pt\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.041607 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-scripts\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.041621 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.046835 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.048215 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-scripts\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.049289 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.049378 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-config-data\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.056943 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-987pt\" (UniqueName: \"kubernetes.io/projected/0a538d8e-6665-46a0-bf05-2957a37bc9a2-kube-api-access-987pt\") pod \"nova-cell0-conductor-db-sync-fr9rg\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.063951 4922 generic.go:334] "Generic (PLEG): container finished" podID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerID="fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052" exitCode=0 Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.064082 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.064187 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"adb38287-ea0b-4e02-8bda-8022f86b0d81","Type":"ContainerDied","Data":"fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052"} Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.064232 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"adb38287-ea0b-4e02-8bda-8022f86b0d81","Type":"ContainerDied","Data":"4a6bcc8bbc7babd809e734ad55eef302113a1ac8d71f673ed0f5c39689263abd"} Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.064249 4922 scope.go:117] "RemoveContainer" containerID="fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.076344 4922 generic.go:334] "Generic (PLEG): container finished" podID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerID="212eecce4deba65fb3c8573614978cbe1b17c325dddc6db6cb3ec4f44cf1941b" exitCode=0 Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.076378 4922 generic.go:334] "Generic (PLEG): container finished" podID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerID="146255e849c42fb7d994de3b5de500fa1339cd40ae040fd55aa1b1a572228f15" exitCode=2 Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.076387 4922 generic.go:334] "Generic (PLEG): container finished" podID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerID="8c233d204d94ede92a35bb4deb9e2d58dd580257d9c9cab116734656884e10cf" exitCode=0 Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.076407 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerDied","Data":"212eecce4deba65fb3c8573614978cbe1b17c325dddc6db6cb3ec4f44cf1941b"} Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.076437 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerDied","Data":"146255e849c42fb7d994de3b5de500fa1339cd40ae040fd55aa1b1a572228f15"} Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.076451 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerDied","Data":"8c233d204d94ede92a35bb4deb9e2d58dd580257d9c9cab116734656884e10cf"} Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.088991 4922 scope.go:117] "RemoveContainer" containerID="b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.107084 4922 scope.go:117] "RemoveContainer" containerID="fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052" Nov 28 07:16:47 crc kubenswrapper[4922]: E1128 07:16:47.107607 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052\": container with ID starting with fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052 not found: ID does not exist" containerID="fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.107656 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052"} err="failed to get container status \"fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052\": rpc error: code = NotFound desc = could not find container \"fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052\": container with ID starting with fa33872a8c402ef10af3d2c2d8f905a05dc2def6cc7ed70358f37b02f79a3052 not found: ID does not exist" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.107683 4922 scope.go:117] "RemoveContainer" containerID="b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c" Nov 28 07:16:47 crc kubenswrapper[4922]: E1128 07:16:47.108297 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c\": container with ID starting with b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c not found: ID does not exist" containerID="b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.108349 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c"} err="failed to get container status \"b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c\": rpc error: code = NotFound desc = could not find container \"b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c\": container with ID starting with b5801fe62bdc725903ec844459b175013f18223c1fb6f02b97215330875f416c not found: ID does not exist" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.136812 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.146580 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-public-tls-certs\") pod \"adb38287-ea0b-4e02-8bda-8022f86b0d81\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.146652 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-logs\") pod \"adb38287-ea0b-4e02-8bda-8022f86b0d81\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.146801 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-config-data\") pod \"adb38287-ea0b-4e02-8bda-8022f86b0d81\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.146829 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-combined-ca-bundle\") pod \"adb38287-ea0b-4e02-8bda-8022f86b0d81\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.146896 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-scripts\") pod \"adb38287-ea0b-4e02-8bda-8022f86b0d81\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.146951 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4w7t9\" (UniqueName: \"kubernetes.io/projected/adb38287-ea0b-4e02-8bda-8022f86b0d81-kube-api-access-4w7t9\") pod \"adb38287-ea0b-4e02-8bda-8022f86b0d81\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.146988 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-httpd-run\") pod \"adb38287-ea0b-4e02-8bda-8022f86b0d81\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.147028 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"adb38287-ea0b-4e02-8bda-8022f86b0d81\" (UID: \"adb38287-ea0b-4e02-8bda-8022f86b0d81\") " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.147730 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-logs" (OuterVolumeSpecName: "logs") pod "adb38287-ea0b-4e02-8bda-8022f86b0d81" (UID: "adb38287-ea0b-4e02-8bda-8022f86b0d81"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.147747 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "adb38287-ea0b-4e02-8bda-8022f86b0d81" (UID: "adb38287-ea0b-4e02-8bda-8022f86b0d81"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.153668 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "adb38287-ea0b-4e02-8bda-8022f86b0d81" (UID: "adb38287-ea0b-4e02-8bda-8022f86b0d81"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.156004 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adb38287-ea0b-4e02-8bda-8022f86b0d81-kube-api-access-4w7t9" (OuterVolumeSpecName: "kube-api-access-4w7t9") pod "adb38287-ea0b-4e02-8bda-8022f86b0d81" (UID: "adb38287-ea0b-4e02-8bda-8022f86b0d81"). InnerVolumeSpecName "kube-api-access-4w7t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.156466 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-scripts" (OuterVolumeSpecName: "scripts") pod "adb38287-ea0b-4e02-8bda-8022f86b0d81" (UID: "adb38287-ea0b-4e02-8bda-8022f86b0d81"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.206392 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "adb38287-ea0b-4e02-8bda-8022f86b0d81" (UID: "adb38287-ea0b-4e02-8bda-8022f86b0d81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.249012 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.249341 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.249354 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4w7t9\" (UniqueName: \"kubernetes.io/projected/adb38287-ea0b-4e02-8bda-8022f86b0d81-kube-api-access-4w7t9\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.249364 4922 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.249400 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.249408 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/adb38287-ea0b-4e02-8bda-8022f86b0d81-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.264630 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "adb38287-ea0b-4e02-8bda-8022f86b0d81" (UID: "adb38287-ea0b-4e02-8bda-8022f86b0d81"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.311873 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-config-data" (OuterVolumeSpecName: "config-data") pod "adb38287-ea0b-4e02-8bda-8022f86b0d81" (UID: "adb38287-ea0b-4e02-8bda-8022f86b0d81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.359302 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.359335 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/adb38287-ea0b-4e02-8bda-8022f86b0d81-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.364695 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.408579 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.449841 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.461098 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.465947 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.475752 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:16:47 crc kubenswrapper[4922]: E1128 07:16:47.476135 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerName="glance-httpd" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.476153 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerName="glance-httpd" Nov 28 07:16:47 crc kubenswrapper[4922]: E1128 07:16:47.476174 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerName="glance-log" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.476181 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerName="glance-log" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.476388 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerName="glance-log" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.476406 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" containerName="glance-httpd" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.477323 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.479918 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.482743 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.484329 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.664182 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.664258 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.664287 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-logs\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.664323 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.664367 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-config-data\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.664393 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.664430 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rdg5\" (UniqueName: \"kubernetes.io/projected/182970fb-401f-404c-81c1-db0294b02167-kube-api-access-5rdg5\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.664451 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-scripts\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.718563 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr9rg"] Nov 28 07:16:47 crc kubenswrapper[4922]: W1128 07:16:47.725167 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a538d8e_6665_46a0_bf05_2957a37bc9a2.slice/crio-051ab5eeb0f7418015c018d6b5cd1bef6e5b544b6cb022ec4b13119051c258d4 WatchSource:0}: Error finding container 051ab5eeb0f7418015c018d6b5cd1bef6e5b544b6cb022ec4b13119051c258d4: Status 404 returned error can't find the container with id 051ab5eeb0f7418015c018d6b5cd1bef6e5b544b6cb022ec4b13119051c258d4 Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.766399 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.766963 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.767073 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-logs\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.767208 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.767398 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-config-data\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.767535 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.767661 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rdg5\" (UniqueName: \"kubernetes.io/projected/182970fb-401f-404c-81c1-db0294b02167-kube-api-access-5rdg5\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.768144 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-scripts\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.768050 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-logs\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.768140 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.766872 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.771858 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-scripts\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.772084 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.773097 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-config-data\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.775966 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.790778 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rdg5\" (UniqueName: \"kubernetes.io/projected/182970fb-401f-404c-81c1-db0294b02167-kube-api-access-5rdg5\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:47 crc kubenswrapper[4922]: I1128 07:16:47.808242 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " pod="openstack/glance-default-external-api-0" Nov 28 07:16:48 crc kubenswrapper[4922]: I1128 07:16:48.086461 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" event={"ID":"0a538d8e-6665-46a0-bf05-2957a37bc9a2","Type":"ContainerStarted","Data":"051ab5eeb0f7418015c018d6b5cd1bef6e5b544b6cb022ec4b13119051c258d4"} Nov 28 07:16:48 crc kubenswrapper[4922]: I1128 07:16:48.099172 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:16:48 crc kubenswrapper[4922]: I1128 07:16:48.620260 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.040176 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.096394 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"182970fb-401f-404c-81c1-db0294b02167","Type":"ContainerStarted","Data":"027019815c473c0d1a21096a0dde1ca6ef8c331aee9f75ce7caae024df518966"} Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.106836 4922 generic.go:334] "Generic (PLEG): container finished" podID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerID="e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877" exitCode=0 Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.106878 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6b683f38-454d-454f-8e2f-66270d8b6ad4","Type":"ContainerDied","Data":"e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877"} Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.106903 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"6b683f38-454d-454f-8e2f-66270d8b6ad4","Type":"ContainerDied","Data":"55bb3843894dfea8aca4a8c22ee1094563c3f3bcd3ef7a4d98e73b43e555cf5a"} Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.106919 4922 scope.go:117] "RemoveContainer" containerID="e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.107068 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.179359 4922 scope.go:117] "RemoveContainer" containerID="f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.204989 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scqvh\" (UniqueName: \"kubernetes.io/projected/6b683f38-454d-454f-8e2f-66270d8b6ad4-kube-api-access-scqvh\") pod \"6b683f38-454d-454f-8e2f-66270d8b6ad4\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.205042 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-combined-ca-bundle\") pod \"6b683f38-454d-454f-8e2f-66270d8b6ad4\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.205097 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-internal-tls-certs\") pod \"6b683f38-454d-454f-8e2f-66270d8b6ad4\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.205163 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"6b683f38-454d-454f-8e2f-66270d8b6ad4\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.205263 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-logs\") pod \"6b683f38-454d-454f-8e2f-66270d8b6ad4\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.205338 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-config-data\") pod \"6b683f38-454d-454f-8e2f-66270d8b6ad4\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.205395 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-scripts\") pod \"6b683f38-454d-454f-8e2f-66270d8b6ad4\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.205450 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-httpd-run\") pod \"6b683f38-454d-454f-8e2f-66270d8b6ad4\" (UID: \"6b683f38-454d-454f-8e2f-66270d8b6ad4\") " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.206353 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6b683f38-454d-454f-8e2f-66270d8b6ad4" (UID: "6b683f38-454d-454f-8e2f-66270d8b6ad4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.207363 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-logs" (OuterVolumeSpecName: "logs") pod "6b683f38-454d-454f-8e2f-66270d8b6ad4" (UID: "6b683f38-454d-454f-8e2f-66270d8b6ad4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.213437 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-scripts" (OuterVolumeSpecName: "scripts") pod "6b683f38-454d-454f-8e2f-66270d8b6ad4" (UID: "6b683f38-454d-454f-8e2f-66270d8b6ad4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.213455 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b683f38-454d-454f-8e2f-66270d8b6ad4-kube-api-access-scqvh" (OuterVolumeSpecName: "kube-api-access-scqvh") pod "6b683f38-454d-454f-8e2f-66270d8b6ad4" (UID: "6b683f38-454d-454f-8e2f-66270d8b6ad4"). InnerVolumeSpecName "kube-api-access-scqvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.213610 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "6b683f38-454d-454f-8e2f-66270d8b6ad4" (UID: "6b683f38-454d-454f-8e2f-66270d8b6ad4"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.228674 4922 scope.go:117] "RemoveContainer" containerID="e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877" Nov 28 07:16:49 crc kubenswrapper[4922]: E1128 07:16:49.231901 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877\": container with ID starting with e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877 not found: ID does not exist" containerID="e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.232026 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877"} err="failed to get container status \"e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877\": rpc error: code = NotFound desc = could not find container \"e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877\": container with ID starting with e8aebc7852e9b795522004738cf16b3d1f57f80a6770d3e3607cc890203e8877 not found: ID does not exist" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.232103 4922 scope.go:117] "RemoveContainer" containerID="f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40" Nov 28 07:16:49 crc kubenswrapper[4922]: E1128 07:16:49.235982 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40\": container with ID starting with f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40 not found: ID does not exist" containerID="f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.236011 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40"} err="failed to get container status \"f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40\": rpc error: code = NotFound desc = could not find container \"f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40\": container with ID starting with f9ae73c6e5820c1e8b8a1b39cbbe3b03d442968a285603ed7dda6482782aad40 not found: ID does not exist" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.261558 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b683f38-454d-454f-8e2f-66270d8b6ad4" (UID: "6b683f38-454d-454f-8e2f-66270d8b6ad4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.279940 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-config-data" (OuterVolumeSpecName: "config-data") pod "6b683f38-454d-454f-8e2f-66270d8b6ad4" (UID: "6b683f38-454d-454f-8e2f-66270d8b6ad4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.281994 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6b683f38-454d-454f-8e2f-66270d8b6ad4" (UID: "6b683f38-454d-454f-8e2f-66270d8b6ad4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.307264 4922 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.307292 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scqvh\" (UniqueName: \"kubernetes.io/projected/6b683f38-454d-454f-8e2f-66270d8b6ad4-kube-api-access-scqvh\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.307302 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.307310 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.307339 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.307348 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b683f38-454d-454f-8e2f-66270d8b6ad4-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.307356 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.307364 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b683f38-454d-454f-8e2f-66270d8b6ad4-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.340753 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.424183 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.437178 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adb38287-ea0b-4e02-8bda-8022f86b0d81" path="/var/lib/kubelet/pods/adb38287-ea0b-4e02-8bda-8022f86b0d81/volumes" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.487287 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.497683 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.507085 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:16:49 crc kubenswrapper[4922]: E1128 07:16:49.507641 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerName="glance-httpd" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.507711 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerName="glance-httpd" Nov 28 07:16:49 crc kubenswrapper[4922]: E1128 07:16:49.507770 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerName="glance-log" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.507818 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerName="glance-log" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.508038 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerName="glance-log" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.508101 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" containerName="glance-httpd" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.509015 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.511344 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.511588 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.516831 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.628250 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.628917 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.629030 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.629080 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6snvh\" (UniqueName: \"kubernetes.io/projected/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-kube-api-access-6snvh\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.629119 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.629162 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.629181 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.629201 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-logs\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.730476 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.730580 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.730610 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6snvh\" (UniqueName: \"kubernetes.io/projected/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-kube-api-access-6snvh\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.730653 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.730677 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.730698 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.730721 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-logs\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.730749 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.731862 4922 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.733249 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.733557 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-logs\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.735339 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.736436 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.737305 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.738939 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.757018 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6snvh\" (UniqueName: \"kubernetes.io/projected/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-kube-api-access-6snvh\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.783468 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"glance-default-internal-api-0\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " pod="openstack/glance-default-internal-api-0" Nov 28 07:16:49 crc kubenswrapper[4922]: I1128 07:16:49.839825 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:50 crc kubenswrapper[4922]: I1128 07:16:50.125155 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"182970fb-401f-404c-81c1-db0294b02167","Type":"ContainerStarted","Data":"d454c9cde22b3235276b93c623bcd54e057f380c3dc05277d2f60b55df5ae160"} Nov 28 07:16:50 crc kubenswrapper[4922]: I1128 07:16:50.125569 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"182970fb-401f-404c-81c1-db0294b02167","Type":"ContainerStarted","Data":"f4286284a20c6edd0e0aa4a65c4e2c67716484e715938e31801e5192f3451cb4"} Nov 28 07:16:50 crc kubenswrapper[4922]: I1128 07:16:50.162707 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.162440736 podStartE2EDuration="3.162440736s" podCreationTimestamp="2025-11-28 07:16:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:50.153077607 +0000 UTC m=+1455.073473219" watchObservedRunningTime="2025-11-28 07:16:50.162440736 +0000 UTC m=+1455.082836318" Nov 28 07:16:50 crc kubenswrapper[4922]: I1128 07:16:50.405425 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:16:50 crc kubenswrapper[4922]: W1128 07:16:50.423939 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0cd494ee_6c17_4d94_96c2_9e2fcf02b2bc.slice/crio-5e99700beb17dd192eb2195ba9c3daa3841a73599e60487affa058d659a1cad7 WatchSource:0}: Error finding container 5e99700beb17dd192eb2195ba9c3daa3841a73599e60487affa058d659a1cad7: Status 404 returned error can't find the container with id 5e99700beb17dd192eb2195ba9c3daa3841a73599e60487affa058d659a1cad7 Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.149137 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc","Type":"ContainerStarted","Data":"c0b119f326caea92368ebddce52cea98327f9c2b47b9496a3050507d1378df2b"} Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.149527 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc","Type":"ContainerStarted","Data":"5e99700beb17dd192eb2195ba9c3daa3841a73599e60487affa058d659a1cad7"} Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.153279 4922 generic.go:334] "Generic (PLEG): container finished" podID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerID="4a00625a58f020d08df019dd1495902c432a469e48284ab1d739b7a8e0cf2e4b" exitCode=0 Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.153345 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerDied","Data":"4a00625a58f020d08df019dd1495902c432a469e48284ab1d739b7a8e0cf2e4b"} Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.229570 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.366254 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-scripts\") pod \"ca306ec1-da8e-4551-8cfe-b5030809c100\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.366371 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-combined-ca-bundle\") pod \"ca306ec1-da8e-4551-8cfe-b5030809c100\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.366407 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-config-data\") pod \"ca306ec1-da8e-4551-8cfe-b5030809c100\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.366465 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-log-httpd\") pod \"ca306ec1-da8e-4551-8cfe-b5030809c100\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.366489 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdzg9\" (UniqueName: \"kubernetes.io/projected/ca306ec1-da8e-4551-8cfe-b5030809c100-kube-api-access-cdzg9\") pod \"ca306ec1-da8e-4551-8cfe-b5030809c100\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.366507 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-sg-core-conf-yaml\") pod \"ca306ec1-da8e-4551-8cfe-b5030809c100\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.366525 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-run-httpd\") pod \"ca306ec1-da8e-4551-8cfe-b5030809c100\" (UID: \"ca306ec1-da8e-4551-8cfe-b5030809c100\") " Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.367143 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ca306ec1-da8e-4551-8cfe-b5030809c100" (UID: "ca306ec1-da8e-4551-8cfe-b5030809c100"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.367958 4922 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.367392 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ca306ec1-da8e-4551-8cfe-b5030809c100" (UID: "ca306ec1-da8e-4551-8cfe-b5030809c100"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.386368 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca306ec1-da8e-4551-8cfe-b5030809c100-kube-api-access-cdzg9" (OuterVolumeSpecName: "kube-api-access-cdzg9") pod "ca306ec1-da8e-4551-8cfe-b5030809c100" (UID: "ca306ec1-da8e-4551-8cfe-b5030809c100"). InnerVolumeSpecName "kube-api-access-cdzg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.388361 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-scripts" (OuterVolumeSpecName: "scripts") pod "ca306ec1-da8e-4551-8cfe-b5030809c100" (UID: "ca306ec1-da8e-4551-8cfe-b5030809c100"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.411297 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ca306ec1-da8e-4551-8cfe-b5030809c100" (UID: "ca306ec1-da8e-4551-8cfe-b5030809c100"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.412127 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b683f38-454d-454f-8e2f-66270d8b6ad4" path="/var/lib/kubelet/pods/6b683f38-454d-454f-8e2f-66270d8b6ad4/volumes" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.461373 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ca306ec1-da8e-4551-8cfe-b5030809c100" (UID: "ca306ec1-da8e-4551-8cfe-b5030809c100"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.469115 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.469144 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdzg9\" (UniqueName: \"kubernetes.io/projected/ca306ec1-da8e-4551-8cfe-b5030809c100-kube-api-access-cdzg9\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.469154 4922 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.469162 4922 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca306ec1-da8e-4551-8cfe-b5030809c100-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.469171 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.481432 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-config-data" (OuterVolumeSpecName: "config-data") pod "ca306ec1-da8e-4551-8cfe-b5030809c100" (UID: "ca306ec1-da8e-4551-8cfe-b5030809c100"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:16:51 crc kubenswrapper[4922]: I1128 07:16:51.582327 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca306ec1-da8e-4551-8cfe-b5030809c100-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.163207 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca306ec1-da8e-4551-8cfe-b5030809c100","Type":"ContainerDied","Data":"f420fa3ec3f7dae78434c1f846e451b7433d72afb4f833dd2d5434c9f7795d2a"} Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.163284 4922 scope.go:117] "RemoveContainer" containerID="212eecce4deba65fb3c8573614978cbe1b17c325dddc6db6cb3ec4f44cf1941b" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.163431 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.178113 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc","Type":"ContainerStarted","Data":"b462e55e4b1d1673313c6dfd787beceb0503ca800228d804b043cfabe37c1295"} Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.212017 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.227461 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.249829 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:52 crc kubenswrapper[4922]: E1128 07:16:52.250279 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="ceilometer-central-agent" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.250298 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="ceilometer-central-agent" Nov 28 07:16:52 crc kubenswrapper[4922]: E1128 07:16:52.250332 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="proxy-httpd" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.250342 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="proxy-httpd" Nov 28 07:16:52 crc kubenswrapper[4922]: E1128 07:16:52.250357 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="ceilometer-notification-agent" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.250363 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="ceilometer-notification-agent" Nov 28 07:16:52 crc kubenswrapper[4922]: E1128 07:16:52.250373 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="sg-core" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.250379 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="sg-core" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.250548 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="ceilometer-central-agent" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.250571 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="proxy-httpd" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.250587 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="sg-core" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.250598 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" containerName="ceilometer-notification-agent" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.252149 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.254281 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.254466 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.259145 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.259127161 podStartE2EDuration="3.259127161s" podCreationTimestamp="2025-11-28 07:16:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:16:52.231868306 +0000 UTC m=+1457.152263908" watchObservedRunningTime="2025-11-28 07:16:52.259127161 +0000 UTC m=+1457.179522743" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.279130 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.396427 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgrhz\" (UniqueName: \"kubernetes.io/projected/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-kube-api-access-xgrhz\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.396485 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-run-httpd\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.396541 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.396600 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.396641 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-log-httpd\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.396662 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-scripts\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.396690 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-config-data\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.498744 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-config-data\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.498918 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgrhz\" (UniqueName: \"kubernetes.io/projected/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-kube-api-access-xgrhz\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.498964 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-run-httpd\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.498993 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.499065 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.499108 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-log-httpd\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.499130 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-scripts\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.500102 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-run-httpd\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.500545 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-log-httpd\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.506806 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-config-data\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.506901 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.510992 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-scripts\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.515319 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.525985 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgrhz\" (UniqueName: \"kubernetes.io/projected/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-kube-api-access-xgrhz\") pod \"ceilometer-0\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " pod="openstack/ceilometer-0" Nov 28 07:16:52 crc kubenswrapper[4922]: I1128 07:16:52.571310 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:16:53 crc kubenswrapper[4922]: I1128 07:16:53.408789 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca306ec1-da8e-4551-8cfe-b5030809c100" path="/var/lib/kubelet/pods/ca306ec1-da8e-4551-8cfe-b5030809c100/volumes" Nov 28 07:16:56 crc kubenswrapper[4922]: I1128 07:16:56.010096 4922 scope.go:117] "RemoveContainer" containerID="146255e849c42fb7d994de3b5de500fa1339cd40ae040fd55aa1b1a572228f15" Nov 28 07:16:56 crc kubenswrapper[4922]: I1128 07:16:56.065809 4922 scope.go:117] "RemoveContainer" containerID="8c233d204d94ede92a35bb4deb9e2d58dd580257d9c9cab116734656884e10cf" Nov 28 07:16:56 crc kubenswrapper[4922]: I1128 07:16:56.261859 4922 scope.go:117] "RemoveContainer" containerID="4a00625a58f020d08df019dd1495902c432a469e48284ab1d739b7a8e0cf2e4b" Nov 28 07:16:56 crc kubenswrapper[4922]: I1128 07:16:56.366984 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:56 crc kubenswrapper[4922]: I1128 07:16:56.519113 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:16:57 crc kubenswrapper[4922]: I1128 07:16:57.232058 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" event={"ID":"0a538d8e-6665-46a0-bf05-2957a37bc9a2","Type":"ContainerStarted","Data":"9af11d45a9ec4a7c6386d4f9531d1d6d3337db4ee65cf969fcc2b73d76c44a51"} Nov 28 07:16:57 crc kubenswrapper[4922]: I1128 07:16:57.235596 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerStarted","Data":"392276c92161f45ebef1c64c95d958a265042ba78e3d04118076d372f900f0f2"} Nov 28 07:16:57 crc kubenswrapper[4922]: I1128 07:16:57.246747 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" podStartSLOduration=2.893909554 podStartE2EDuration="11.246731211s" podCreationTimestamp="2025-11-28 07:16:46 +0000 UTC" firstStartedPulling="2025-11-28 07:16:47.727713977 +0000 UTC m=+1452.648109559" lastFinishedPulling="2025-11-28 07:16:56.080535634 +0000 UTC m=+1461.000931216" observedRunningTime="2025-11-28 07:16:57.246202057 +0000 UTC m=+1462.166597649" watchObservedRunningTime="2025-11-28 07:16:57.246731211 +0000 UTC m=+1462.167126793" Nov 28 07:16:58 crc kubenswrapper[4922]: I1128 07:16:58.099812 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 07:16:58 crc kubenswrapper[4922]: I1128 07:16:58.100174 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 07:16:58 crc kubenswrapper[4922]: I1128 07:16:58.170569 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 07:16:58 crc kubenswrapper[4922]: I1128 07:16:58.207279 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 07:16:58 crc kubenswrapper[4922]: I1128 07:16:58.264541 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerStarted","Data":"33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df"} Nov 28 07:16:58 crc kubenswrapper[4922]: I1128 07:16:58.264590 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 07:16:58 crc kubenswrapper[4922]: I1128 07:16:58.265298 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 07:16:59 crc kubenswrapper[4922]: I1128 07:16:59.274814 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerStarted","Data":"cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d"} Nov 28 07:16:59 crc kubenswrapper[4922]: I1128 07:16:59.840956 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:59 crc kubenswrapper[4922]: I1128 07:16:59.841282 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:59 crc kubenswrapper[4922]: I1128 07:16:59.890632 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 07:16:59 crc kubenswrapper[4922]: I1128 07:16:59.901987 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 07:17:00 crc kubenswrapper[4922]: I1128 07:17:00.186926 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 07:17:00 crc kubenswrapper[4922]: I1128 07:17:00.288683 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 07:17:00 crc kubenswrapper[4922]: I1128 07:17:00.289577 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerStarted","Data":"100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975"} Nov 28 07:17:00 crc kubenswrapper[4922]: I1128 07:17:00.290530 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 07:17:00 crc kubenswrapper[4922]: I1128 07:17:00.290559 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 07:17:00 crc kubenswrapper[4922]: I1128 07:17:00.415311 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 07:17:02 crc kubenswrapper[4922]: I1128 07:17:02.311327 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 07:17:02 crc kubenswrapper[4922]: I1128 07:17:02.311802 4922 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 07:17:02 crc kubenswrapper[4922]: I1128 07:17:02.603742 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 07:17:02 crc kubenswrapper[4922]: I1128 07:17:02.704979 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 07:17:03 crc kubenswrapper[4922]: I1128 07:17:03.322061 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerStarted","Data":"acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335"} Nov 28 07:17:03 crc kubenswrapper[4922]: I1128 07:17:03.322501 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 07:17:03 crc kubenswrapper[4922]: I1128 07:17:03.322251 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="ceilometer-notification-agent" containerID="cri-o://cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d" gracePeriod=30 Nov 28 07:17:03 crc kubenswrapper[4922]: I1128 07:17:03.322549 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="proxy-httpd" containerID="cri-o://acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335" gracePeriod=30 Nov 28 07:17:03 crc kubenswrapper[4922]: I1128 07:17:03.322302 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="sg-core" containerID="cri-o://100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975" gracePeriod=30 Nov 28 07:17:03 crc kubenswrapper[4922]: I1128 07:17:03.322156 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="ceilometer-central-agent" containerID="cri-o://33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df" gracePeriod=30 Nov 28 07:17:03 crc kubenswrapper[4922]: I1128 07:17:03.353621 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.650946996 podStartE2EDuration="11.353600141s" podCreationTimestamp="2025-11-28 07:16:52 +0000 UTC" firstStartedPulling="2025-11-28 07:16:56.530009062 +0000 UTC m=+1461.450404644" lastFinishedPulling="2025-11-28 07:17:02.232662177 +0000 UTC m=+1467.153057789" observedRunningTime="2025-11-28 07:17:03.346118871 +0000 UTC m=+1468.266514483" watchObservedRunningTime="2025-11-28 07:17:03.353600141 +0000 UTC m=+1468.273995723" Nov 28 07:17:04 crc kubenswrapper[4922]: I1128 07:17:04.338794 4922 generic.go:334] "Generic (PLEG): container finished" podID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerID="acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335" exitCode=0 Nov 28 07:17:04 crc kubenswrapper[4922]: I1128 07:17:04.338836 4922 generic.go:334] "Generic (PLEG): container finished" podID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerID="100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975" exitCode=2 Nov 28 07:17:04 crc kubenswrapper[4922]: I1128 07:17:04.338848 4922 generic.go:334] "Generic (PLEG): container finished" podID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerID="cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d" exitCode=0 Nov 28 07:17:04 crc kubenswrapper[4922]: I1128 07:17:04.340031 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerDied","Data":"acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335"} Nov 28 07:17:04 crc kubenswrapper[4922]: I1128 07:17:04.340361 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerDied","Data":"100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975"} Nov 28 07:17:04 crc kubenswrapper[4922]: I1128 07:17:04.340384 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerDied","Data":"cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d"} Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.132457 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.256952 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-log-httpd\") pod \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.257410 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgrhz\" (UniqueName: \"kubernetes.io/projected/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-kube-api-access-xgrhz\") pod \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.257448 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-combined-ca-bundle\") pod \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.257504 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-run-httpd\") pod \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.257507 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" (UID: "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.257543 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-sg-core-conf-yaml\") pod \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.257604 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-scripts\") pod \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.257844 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-config-data\") pod \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\" (UID: \"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef\") " Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.258458 4922 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.258652 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" (UID: "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.267474 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-kube-api-access-xgrhz" (OuterVolumeSpecName: "kube-api-access-xgrhz") pod "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" (UID: "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef"). InnerVolumeSpecName "kube-api-access-xgrhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.275369 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-scripts" (OuterVolumeSpecName: "scripts") pod "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" (UID: "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.328577 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" (UID: "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.359847 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" (UID: "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.360260 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.360316 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgrhz\" (UniqueName: \"kubernetes.io/projected/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-kube-api-access-xgrhz\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.360338 4922 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.360357 4922 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.360377 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.439739 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-config-data" (OuterVolumeSpecName: "config-data") pod "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" (UID: "f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.451928 4922 generic.go:334] "Generic (PLEG): container finished" podID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerID="33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df" exitCode=0 Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.452001 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.452029 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerDied","Data":"33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df"} Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.452492 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef","Type":"ContainerDied","Data":"392276c92161f45ebef1c64c95d958a265042ba78e3d04118076d372f900f0f2"} Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.452517 4922 scope.go:117] "RemoveContainer" containerID="acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.462409 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.539353 4922 scope.go:117] "RemoveContainer" containerID="100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.543459 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.553137 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.561008 4922 scope.go:117] "RemoveContainer" containerID="cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.579968 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:09 crc kubenswrapper[4922]: E1128 07:17:09.580366 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="proxy-httpd" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.580379 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="proxy-httpd" Nov 28 07:17:09 crc kubenswrapper[4922]: E1128 07:17:09.580414 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="ceilometer-central-agent" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.580420 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="ceilometer-central-agent" Nov 28 07:17:09 crc kubenswrapper[4922]: E1128 07:17:09.580434 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="ceilometer-notification-agent" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.580440 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="ceilometer-notification-agent" Nov 28 07:17:09 crc kubenswrapper[4922]: E1128 07:17:09.580450 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="sg-core" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.580455 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="sg-core" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.580674 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="ceilometer-notification-agent" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.580698 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="proxy-httpd" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.580731 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="sg-core" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.580741 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" containerName="ceilometer-central-agent" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.582730 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.586632 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.587240 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.592528 4922 scope.go:117] "RemoveContainer" containerID="33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.603380 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.615405 4922 scope.go:117] "RemoveContainer" containerID="acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335" Nov 28 07:17:09 crc kubenswrapper[4922]: E1128 07:17:09.616004 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335\": container with ID starting with acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335 not found: ID does not exist" containerID="acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.616097 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335"} err="failed to get container status \"acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335\": rpc error: code = NotFound desc = could not find container \"acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335\": container with ID starting with acae5e00122e766c9eed7f3e79b75508af0500661d76036ba5bb8430f55cd335 not found: ID does not exist" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.616198 4922 scope.go:117] "RemoveContainer" containerID="100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975" Nov 28 07:17:09 crc kubenswrapper[4922]: E1128 07:17:09.616591 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975\": container with ID starting with 100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975 not found: ID does not exist" containerID="100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.616665 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975"} err="failed to get container status \"100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975\": rpc error: code = NotFound desc = could not find container \"100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975\": container with ID starting with 100f2d0df96a65f044858959fc9d2c7023b44648f46d4f40df493c10716b2975 not found: ID does not exist" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.616737 4922 scope.go:117] "RemoveContainer" containerID="cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d" Nov 28 07:17:09 crc kubenswrapper[4922]: E1128 07:17:09.617014 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d\": container with ID starting with cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d not found: ID does not exist" containerID="cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.617092 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d"} err="failed to get container status \"cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d\": rpc error: code = NotFound desc = could not find container \"cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d\": container with ID starting with cc9025e3612f4604cec5ca69a6c7b1d7f12cc576c6679b92199a2550378bba5d not found: ID does not exist" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.617159 4922 scope.go:117] "RemoveContainer" containerID="33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df" Nov 28 07:17:09 crc kubenswrapper[4922]: E1128 07:17:09.617459 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df\": container with ID starting with 33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df not found: ID does not exist" containerID="33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.617551 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df"} err="failed to get container status \"33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df\": rpc error: code = NotFound desc = could not find container \"33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df\": container with ID starting with 33d4cde3f8b9b25d6fd6c1b90335deeebb339da57acfdb261aea3204194810df not found: ID does not exist" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.666176 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-run-httpd\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.666470 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f9pt\" (UniqueName: \"kubernetes.io/projected/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-kube-api-access-7f9pt\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.666611 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-config-data\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.666711 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-scripts\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.666793 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-log-httpd\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.666878 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.667003 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.768946 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-config-data\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.769029 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-scripts\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.769059 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-log-httpd\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.769096 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.769166 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.769197 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-run-httpd\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.769246 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f9pt\" (UniqueName: \"kubernetes.io/projected/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-kube-api-access-7f9pt\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.770762 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-run-httpd\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.770806 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-log-httpd\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.775649 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.776746 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-scripts\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.778631 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-config-data\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.779099 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.788389 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f9pt\" (UniqueName: \"kubernetes.io/projected/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-kube-api-access-7f9pt\") pod \"ceilometer-0\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " pod="openstack/ceilometer-0" Nov 28 07:17:09 crc kubenswrapper[4922]: I1128 07:17:09.916962 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:17:10 crc kubenswrapper[4922]: I1128 07:17:10.411816 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:10 crc kubenswrapper[4922]: W1128 07:17:10.414433 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7109c3e_fe30_4320_bc5b_77e8d8d9fcd5.slice/crio-9cd35fa71c7f03b885e0f3947217209e0484a820b584921951bfeed938b4d971 WatchSource:0}: Error finding container 9cd35fa71c7f03b885e0f3947217209e0484a820b584921951bfeed938b4d971: Status 404 returned error can't find the container with id 9cd35fa71c7f03b885e0f3947217209e0484a820b584921951bfeed938b4d971 Nov 28 07:17:10 crc kubenswrapper[4922]: I1128 07:17:10.486147 4922 generic.go:334] "Generic (PLEG): container finished" podID="0a538d8e-6665-46a0-bf05-2957a37bc9a2" containerID="9af11d45a9ec4a7c6386d4f9531d1d6d3337db4ee65cf969fcc2b73d76c44a51" exitCode=0 Nov 28 07:17:10 crc kubenswrapper[4922]: I1128 07:17:10.486267 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" event={"ID":"0a538d8e-6665-46a0-bf05-2957a37bc9a2","Type":"ContainerDied","Data":"9af11d45a9ec4a7c6386d4f9531d1d6d3337db4ee65cf969fcc2b73d76c44a51"} Nov 28 07:17:10 crc kubenswrapper[4922]: I1128 07:17:10.490552 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerStarted","Data":"9cd35fa71c7f03b885e0f3947217209e0484a820b584921951bfeed938b4d971"} Nov 28 07:17:11 crc kubenswrapper[4922]: I1128 07:17:11.417201 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef" path="/var/lib/kubelet/pods/f5d7ad45-a1fd-45f7-b292-cf6ae5b4e6ef/volumes" Nov 28 07:17:11 crc kubenswrapper[4922]: I1128 07:17:11.506439 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerStarted","Data":"24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e"} Nov 28 07:17:11 crc kubenswrapper[4922]: I1128 07:17:11.928679 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.012670 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-config-data\") pod \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.012842 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-987pt\" (UniqueName: \"kubernetes.io/projected/0a538d8e-6665-46a0-bf05-2957a37bc9a2-kube-api-access-987pt\") pod \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.012940 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-combined-ca-bundle\") pod \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.013048 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-scripts\") pod \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\" (UID: \"0a538d8e-6665-46a0-bf05-2957a37bc9a2\") " Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.030977 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-scripts" (OuterVolumeSpecName: "scripts") pod "0a538d8e-6665-46a0-bf05-2957a37bc9a2" (UID: "0a538d8e-6665-46a0-bf05-2957a37bc9a2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.031836 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a538d8e-6665-46a0-bf05-2957a37bc9a2-kube-api-access-987pt" (OuterVolumeSpecName: "kube-api-access-987pt") pod "0a538d8e-6665-46a0-bf05-2957a37bc9a2" (UID: "0a538d8e-6665-46a0-bf05-2957a37bc9a2"). InnerVolumeSpecName "kube-api-access-987pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.081179 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a538d8e-6665-46a0-bf05-2957a37bc9a2" (UID: "0a538d8e-6665-46a0-bf05-2957a37bc9a2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.093885 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-config-data" (OuterVolumeSpecName: "config-data") pod "0a538d8e-6665-46a0-bf05-2957a37bc9a2" (UID: "0a538d8e-6665-46a0-bf05-2957a37bc9a2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.114738 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.114771 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.114781 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a538d8e-6665-46a0-bf05-2957a37bc9a2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.114792 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-987pt\" (UniqueName: \"kubernetes.io/projected/0a538d8e-6665-46a0-bf05-2957a37bc9a2-kube-api-access-987pt\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.525912 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.525905 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-fr9rg" event={"ID":"0a538d8e-6665-46a0-bf05-2957a37bc9a2","Type":"ContainerDied","Data":"051ab5eeb0f7418015c018d6b5cd1bef6e5b544b6cb022ec4b13119051c258d4"} Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.526405 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="051ab5eeb0f7418015c018d6b5cd1bef6e5b544b6cb022ec4b13119051c258d4" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.529947 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerStarted","Data":"df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93"} Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.620695 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 07:17:12 crc kubenswrapper[4922]: E1128 07:17:12.621168 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a538d8e-6665-46a0-bf05-2957a37bc9a2" containerName="nova-cell0-conductor-db-sync" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.621181 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a538d8e-6665-46a0-bf05-2957a37bc9a2" containerName="nova-cell0-conductor-db-sync" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.621403 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a538d8e-6665-46a0-bf05-2957a37bc9a2" containerName="nova-cell0-conductor-db-sync" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.622047 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.624778 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-ptpqn" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.625019 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.654480 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.728315 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.728374 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.728491 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rtbd\" (UniqueName: \"kubernetes.io/projected/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-kube-api-access-9rtbd\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.830977 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rtbd\" (UniqueName: \"kubernetes.io/projected/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-kube-api-access-9rtbd\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.831629 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.831672 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.836948 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.836954 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.847950 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rtbd\" (UniqueName: \"kubernetes.io/projected/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-kube-api-access-9rtbd\") pod \"nova-cell0-conductor-0\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:12 crc kubenswrapper[4922]: I1128 07:17:12.959069 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:13 crc kubenswrapper[4922]: I1128 07:17:13.499111 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 07:17:13 crc kubenswrapper[4922]: W1128 07:17:13.507875 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7dfc2e52_b959_4718_8f85_5bcec1a8ad10.slice/crio-46ede5c07bbe0eb6357b27c8a8cafeb5d767760d0bf355ecdd0cb6a1b85fbe13 WatchSource:0}: Error finding container 46ede5c07bbe0eb6357b27c8a8cafeb5d767760d0bf355ecdd0cb6a1b85fbe13: Status 404 returned error can't find the container with id 46ede5c07bbe0eb6357b27c8a8cafeb5d767760d0bf355ecdd0cb6a1b85fbe13 Nov 28 07:17:13 crc kubenswrapper[4922]: I1128 07:17:13.540582 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"7dfc2e52-b959-4718-8f85-5bcec1a8ad10","Type":"ContainerStarted","Data":"46ede5c07bbe0eb6357b27c8a8cafeb5d767760d0bf355ecdd0cb6a1b85fbe13"} Nov 28 07:17:13 crc kubenswrapper[4922]: I1128 07:17:13.542061 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerStarted","Data":"3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d"} Nov 28 07:17:14 crc kubenswrapper[4922]: I1128 07:17:14.553097 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"7dfc2e52-b959-4718-8f85-5bcec1a8ad10","Type":"ContainerStarted","Data":"e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de"} Nov 28 07:17:14 crc kubenswrapper[4922]: I1128 07:17:14.556598 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerStarted","Data":"10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858"} Nov 28 07:17:14 crc kubenswrapper[4922]: I1128 07:17:14.557786 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 07:17:14 crc kubenswrapper[4922]: I1128 07:17:14.594931 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.594911018 podStartE2EDuration="2.594911018s" podCreationTimestamp="2025-11-28 07:17:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:17:14.587710946 +0000 UTC m=+1479.508106578" watchObservedRunningTime="2025-11-28 07:17:14.594911018 +0000 UTC m=+1479.515306610" Nov 28 07:17:14 crc kubenswrapper[4922]: I1128 07:17:14.640963 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.230113383 podStartE2EDuration="5.640941602s" podCreationTimestamp="2025-11-28 07:17:09 +0000 UTC" firstStartedPulling="2025-11-28 07:17:10.418252903 +0000 UTC m=+1475.338648495" lastFinishedPulling="2025-11-28 07:17:13.829081121 +0000 UTC m=+1478.749476714" observedRunningTime="2025-11-28 07:17:14.619974925 +0000 UTC m=+1479.540370547" watchObservedRunningTime="2025-11-28 07:17:14.640941602 +0000 UTC m=+1479.561337194" Nov 28 07:17:15 crc kubenswrapper[4922]: I1128 07:17:15.567569 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.009536 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.492552 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-m8jjn"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.494255 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.496158 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.501581 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.514341 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-m8jjn"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.559657 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zklj7\" (UniqueName: \"kubernetes.io/projected/97450035-755a-422c-ade7-b3bd97b917cd-kube-api-access-zklj7\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.559774 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-config-data\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.559851 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.559965 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-scripts\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.661511 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-scripts\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.661656 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zklj7\" (UniqueName: \"kubernetes.io/projected/97450035-755a-422c-ade7-b3bd97b917cd-kube-api-access-zklj7\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.661710 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-config-data\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.661748 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.662964 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.664104 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.668891 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.671002 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-scripts\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.675016 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.675598 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-config-data\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.685557 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zklj7\" (UniqueName: \"kubernetes.io/projected/97450035-755a-422c-ade7-b3bd97b917cd-kube-api-access-zklj7\") pod \"nova-cell0-cell-mapping-m8jjn\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.692277 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.710562 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.712188 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.718302 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.731895 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.763356 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws7mm\" (UniqueName: \"kubernetes.io/projected/b9239175-af51-46ed-a36b-a5b98a1bb790-kube-api-access-ws7mm\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.763442 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.763524 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9239175-af51-46ed-a36b-a5b98a1bb790-logs\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.763579 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.763600 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-config-data\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.763648 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75xpd\" (UniqueName: \"kubernetes.io/projected/57109065-ce83-4dea-96c1-48c94d854c36-kube-api-access-75xpd\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.763721 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-config-data\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.811566 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.862184 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.868059 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.866268 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws7mm\" (UniqueName: \"kubernetes.io/projected/b9239175-af51-46ed-a36b-a5b98a1bb790-kube-api-access-ws7mm\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.868825 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.871365 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9239175-af51-46ed-a36b-a5b98a1bb790-logs\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.871579 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.871765 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-config-data\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.871852 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75xpd\" (UniqueName: \"kubernetes.io/projected/57109065-ce83-4dea-96c1-48c94d854c36-kube-api-access-75xpd\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.871918 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-config-data\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.872391 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9239175-af51-46ed-a36b-a5b98a1bb790-logs\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.875880 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.878824 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.883171 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-config-data\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.887942 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.888287 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.899788 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-config-data\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.913738 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws7mm\" (UniqueName: \"kubernetes.io/projected/b9239175-af51-46ed-a36b-a5b98a1bb790-kube-api-access-ws7mm\") pod \"nova-metadata-0\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " pod="openstack/nova-metadata-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.914581 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75xpd\" (UniqueName: \"kubernetes.io/projected/57109065-ce83-4dea-96c1-48c94d854c36-kube-api-access-75xpd\") pod \"nova-scheduler-0\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.918261 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.923959 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.935876 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.941005 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.975775 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75df6cf455-8zvjq"] Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.976359 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.976528 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.976690 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7216d230-a69f-4954-96aa-cc4b95403298-logs\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.976766 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h92b7\" (UniqueName: \"kubernetes.io/projected/7216d230-a69f-4954-96aa-cc4b95403298-kube-api-access-h92b7\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.976894 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9hc2\" (UniqueName: \"kubernetes.io/projected/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-kube-api-access-t9hc2\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.977004 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-config-data\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.977108 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:23 crc kubenswrapper[4922]: I1128 07:17:23.977907 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.001989 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75df6cf455-8zvjq"] Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.081280 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.081350 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znqxk\" (UniqueName: \"kubernetes.io/projected/54142b32-7142-41ee-af4b-4a020911c136-kube-api-access-znqxk\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.081380 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7216d230-a69f-4954-96aa-cc4b95403298-logs\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.081401 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h92b7\" (UniqueName: \"kubernetes.io/projected/7216d230-a69f-4954-96aa-cc4b95403298-kube-api-access-h92b7\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.081441 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-swift-storage-0\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.084708 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.086359 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9hc2\" (UniqueName: \"kubernetes.io/projected/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-kube-api-access-t9hc2\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.086453 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-config-data\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.086522 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-nb\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.086548 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-svc\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.086566 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.086591 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-sb\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.086639 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.086672 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-config\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.087461 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7216d230-a69f-4954-96aa-cc4b95403298-logs\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.087673 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.093241 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.096850 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-config-data\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.098001 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.103191 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.109058 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h92b7\" (UniqueName: \"kubernetes.io/projected/7216d230-a69f-4954-96aa-cc4b95403298-kube-api-access-h92b7\") pod \"nova-api-0\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.109872 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9hc2\" (UniqueName: \"kubernetes.io/projected/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-kube-api-access-t9hc2\") pod \"nova-cell1-novncproxy-0\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.189207 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-nb\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.189265 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-svc\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.189293 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-sb\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.189326 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-config\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.189384 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znqxk\" (UniqueName: \"kubernetes.io/projected/54142b32-7142-41ee-af4b-4a020911c136-kube-api-access-znqxk\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.189595 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-swift-storage-0\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.190148 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-nb\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.190338 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-swift-storage-0\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.190795 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-config\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.191063 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-sb\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.191163 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-svc\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.204830 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znqxk\" (UniqueName: \"kubernetes.io/projected/54142b32-7142-41ee-af4b-4a020911c136-kube-api-access-znqxk\") pod \"dnsmasq-dns-75df6cf455-8zvjq\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.316954 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.356478 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.358053 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.481687 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-m8jjn"] Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.718373 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m8jjn" event={"ID":"97450035-755a-422c-ade7-b3bd97b917cd","Type":"ContainerStarted","Data":"772905e0e25ce6c6fc6050f18ff5bd4d99d527ddda0058cc0a0b5e92a75f5195"} Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.750673 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:24 crc kubenswrapper[4922]: W1128 07:17:24.767972 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57109065_ce83_4dea_96c1_48c94d854c36.slice/crio-1dcead371a0ef9c9fcaa760ac003c2f71b770c1de11c466a5d00ef3a7bbbffc8 WatchSource:0}: Error finding container 1dcead371a0ef9c9fcaa760ac003c2f71b770c1de11c466a5d00ef3a7bbbffc8: Status 404 returned error can't find the container with id 1dcead371a0ef9c9fcaa760ac003c2f71b770c1de11c466a5d00ef3a7bbbffc8 Nov 28 07:17:24 crc kubenswrapper[4922]: I1128 07:17:24.893685 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.004332 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ncg57"] Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.005814 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.007766 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.007936 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.011934 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ncg57"] Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.079327 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:25 crc kubenswrapper[4922]: W1128 07:17:25.093828 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56dcaf4a_8a1b_471b_9f04_c372f15b8fa5.slice/crio-db559aaf5565335b2b892cbd01715b061dc29e55a215040eb5b0cdf095ce107a WatchSource:0}: Error finding container db559aaf5565335b2b892cbd01715b061dc29e55a215040eb5b0cdf095ce107a: Status 404 returned error can't find the container with id db559aaf5565335b2b892cbd01715b061dc29e55a215040eb5b0cdf095ce107a Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.096079 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.120025 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rthl7\" (UniqueName: \"kubernetes.io/projected/2844ae42-cefd-4863-b8f4-95d253b1a5b4-kube-api-access-rthl7\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.120312 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-scripts\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.120380 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-config-data\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.120414 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.203734 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75df6cf455-8zvjq"] Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.222198 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-config-data\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.222270 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.222348 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rthl7\" (UniqueName: \"kubernetes.io/projected/2844ae42-cefd-4863-b8f4-95d253b1a5b4-kube-api-access-rthl7\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.222386 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-scripts\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.226071 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.226897 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-config-data\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.227307 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-scripts\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.242437 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rthl7\" (UniqueName: \"kubernetes.io/projected/2844ae42-cefd-4863-b8f4-95d253b1a5b4-kube-api-access-rthl7\") pod \"nova-cell1-conductor-db-sync-ncg57\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.329963 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.736938 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5","Type":"ContainerStarted","Data":"db559aaf5565335b2b892cbd01715b061dc29e55a215040eb5b0cdf095ce107a"} Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.738949 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9239175-af51-46ed-a36b-a5b98a1bb790","Type":"ContainerStarted","Data":"79cd883bce84774776051ed457e1ffa2c8c667293281c4a9b7a6818a9fd6c448"} Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.740803 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7216d230-a69f-4954-96aa-cc4b95403298","Type":"ContainerStarted","Data":"ad08734605bf274f3a0270035886d0ad3dfec740c0d5babfd0bbdf2a68b37d8f"} Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.741910 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"57109065-ce83-4dea-96c1-48c94d854c36","Type":"ContainerStarted","Data":"1dcead371a0ef9c9fcaa760ac003c2f71b770c1de11c466a5d00ef3a7bbbffc8"} Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.746299 4922 generic.go:334] "Generic (PLEG): container finished" podID="54142b32-7142-41ee-af4b-4a020911c136" containerID="7483003f20a270bbf5574501f5b8ee700ddb926515c72515339c39eeb42a40e0" exitCode=0 Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.746343 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" event={"ID":"54142b32-7142-41ee-af4b-4a020911c136","Type":"ContainerDied","Data":"7483003f20a270bbf5574501f5b8ee700ddb926515c72515339c39eeb42a40e0"} Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.746422 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" event={"ID":"54142b32-7142-41ee-af4b-4a020911c136","Type":"ContainerStarted","Data":"401a537fd44921f6a08993dca9060a5101e75e0c84f05f3336a75fae2f033bf7"} Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.750009 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m8jjn" event={"ID":"97450035-755a-422c-ade7-b3bd97b917cd","Type":"ContainerStarted","Data":"0c67e228e5c4031baab99deaaa17d2e3b356c3de5d17e6c1f5a5e4b5ad468777"} Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.802005 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ncg57"] Nov 28 07:17:25 crc kubenswrapper[4922]: I1128 07:17:25.808873 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-m8jjn" podStartSLOduration=2.808752174 podStartE2EDuration="2.808752174s" podCreationTimestamp="2025-11-28 07:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:17:25.793540889 +0000 UTC m=+1490.713936471" watchObservedRunningTime="2025-11-28 07:17:25.808752174 +0000 UTC m=+1490.729147756" Nov 28 07:17:25 crc kubenswrapper[4922]: W1128 07:17:25.810315 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2844ae42_cefd_4863_b8f4_95d253b1a5b4.slice/crio-aac55a923e9705e44ddffe83cdd0f791d921cd7b8ab2eeddae2f7d55ed15a910 WatchSource:0}: Error finding container aac55a923e9705e44ddffe83cdd0f791d921cd7b8ab2eeddae2f7d55ed15a910: Status 404 returned error can't find the container with id aac55a923e9705e44ddffe83cdd0f791d921cd7b8ab2eeddae2f7d55ed15a910 Nov 28 07:17:26 crc kubenswrapper[4922]: I1128 07:17:26.786961 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-ncg57" event={"ID":"2844ae42-cefd-4863-b8f4-95d253b1a5b4","Type":"ContainerStarted","Data":"aac55a923e9705e44ddffe83cdd0f791d921cd7b8ab2eeddae2f7d55ed15a910"} Nov 28 07:17:26 crc kubenswrapper[4922]: I1128 07:17:26.792333 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" event={"ID":"54142b32-7142-41ee-af4b-4a020911c136","Type":"ContainerStarted","Data":"481401725c47439fb318ffd00b81bd3da87b04044342d823c60fbcdefada5137"} Nov 28 07:17:26 crc kubenswrapper[4922]: I1128 07:17:26.792409 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:26 crc kubenswrapper[4922]: I1128 07:17:26.810470 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" podStartSLOduration=3.810452205 podStartE2EDuration="3.810452205s" podCreationTimestamp="2025-11-28 07:17:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:17:26.808432362 +0000 UTC m=+1491.728827944" watchObservedRunningTime="2025-11-28 07:17:26.810452205 +0000 UTC m=+1491.730847787" Nov 28 07:17:27 crc kubenswrapper[4922]: I1128 07:17:27.536207 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:17:27 crc kubenswrapper[4922]: I1128 07:17:27.548995 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:17:27 crc kubenswrapper[4922]: I1128 07:17:27.802259 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-ncg57" event={"ID":"2844ae42-cefd-4863-b8f4-95d253b1a5b4","Type":"ContainerStarted","Data":"2b64afb60c87216a1524a5214f7e0fc6872a12dbbbbe2a4ea9c5c09709f0b040"} Nov 28 07:17:27 crc kubenswrapper[4922]: I1128 07:17:27.821261 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-ncg57" podStartSLOduration=3.821203567 podStartE2EDuration="3.821203567s" podCreationTimestamp="2025-11-28 07:17:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:17:27.816209484 +0000 UTC m=+1492.736605066" watchObservedRunningTime="2025-11-28 07:17:27.821203567 +0000 UTC m=+1492.741599149" Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.812380 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"57109065-ce83-4dea-96c1-48c94d854c36","Type":"ContainerStarted","Data":"4e0c07a0ee25c791acbccef9889e9bd19a58b0f338022e8f11c9040d8c7194b4"} Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.815649 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5","Type":"ContainerStarted","Data":"a009cbf707531870a158c461b6eb3d1c1b0328701de2624ac27d511eb95f3f7f"} Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.816184 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a009cbf707531870a158c461b6eb3d1c1b0328701de2624ac27d511eb95f3f7f" gracePeriod=30 Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.818797 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9239175-af51-46ed-a36b-a5b98a1bb790","Type":"ContainerStarted","Data":"a7ce397f63e2c6437b00b9fae1a00e0f14b52fe35434a7c615e78de81dc1cdf2"} Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.818823 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9239175-af51-46ed-a36b-a5b98a1bb790","Type":"ContainerStarted","Data":"6f611dbe3a4e388935f6d106d42d124fc15b557a46cdb2ff260bd9f2f857e234"} Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.819006 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerName="nova-metadata-log" containerID="cri-o://6f611dbe3a4e388935f6d106d42d124fc15b557a46cdb2ff260bd9f2f857e234" gracePeriod=30 Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.819188 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerName="nova-metadata-metadata" containerID="cri-o://a7ce397f63e2c6437b00b9fae1a00e0f14b52fe35434a7c615e78de81dc1cdf2" gracePeriod=30 Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.824095 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7216d230-a69f-4954-96aa-cc4b95403298","Type":"ContainerStarted","Data":"7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce"} Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.824296 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7216d230-a69f-4954-96aa-cc4b95403298","Type":"ContainerStarted","Data":"0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92"} Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.843655 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.496625269 podStartE2EDuration="5.84363384s" podCreationTimestamp="2025-11-28 07:17:23 +0000 UTC" firstStartedPulling="2025-11-28 07:17:24.778133533 +0000 UTC m=+1489.698529115" lastFinishedPulling="2025-11-28 07:17:28.125142104 +0000 UTC m=+1493.045537686" observedRunningTime="2025-11-28 07:17:28.828614091 +0000 UTC m=+1493.749009693" watchObservedRunningTime="2025-11-28 07:17:28.84363384 +0000 UTC m=+1493.764029432" Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.855099 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.792328577 podStartE2EDuration="5.855062064s" podCreationTimestamp="2025-11-28 07:17:23 +0000 UTC" firstStartedPulling="2025-11-28 07:17:25.066081025 +0000 UTC m=+1489.986476607" lastFinishedPulling="2025-11-28 07:17:28.128814522 +0000 UTC m=+1493.049210094" observedRunningTime="2025-11-28 07:17:28.849582138 +0000 UTC m=+1493.769977730" watchObservedRunningTime="2025-11-28 07:17:28.855062064 +0000 UTC m=+1493.775457646" Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.881346 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.853153526 podStartE2EDuration="5.881328084s" podCreationTimestamp="2025-11-28 07:17:23 +0000 UTC" firstStartedPulling="2025-11-28 07:17:25.099415862 +0000 UTC m=+1490.019811444" lastFinishedPulling="2025-11-28 07:17:28.12759042 +0000 UTC m=+1493.047986002" observedRunningTime="2025-11-28 07:17:28.874110871 +0000 UTC m=+1493.794506453" watchObservedRunningTime="2025-11-28 07:17:28.881328084 +0000 UTC m=+1493.801723676" Nov 28 07:17:28 crc kubenswrapper[4922]: I1128 07:17:28.897752 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.665891582 podStartE2EDuration="5.897735239s" podCreationTimestamp="2025-11-28 07:17:23 +0000 UTC" firstStartedPulling="2025-11-28 07:17:24.896846291 +0000 UTC m=+1489.817241873" lastFinishedPulling="2025-11-28 07:17:28.128689908 +0000 UTC m=+1493.049085530" observedRunningTime="2025-11-28 07:17:28.895991804 +0000 UTC m=+1493.816387406" watchObservedRunningTime="2025-11-28 07:17:28.897735239 +0000 UTC m=+1493.818130821" Nov 28 07:17:29 crc kubenswrapper[4922]: I1128 07:17:29.087303 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 07:17:29 crc kubenswrapper[4922]: I1128 07:17:29.094406 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 07:17:29 crc kubenswrapper[4922]: I1128 07:17:29.094457 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 07:17:29 crc kubenswrapper[4922]: I1128 07:17:29.357726 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:29 crc kubenswrapper[4922]: I1128 07:17:29.860785 4922 generic.go:334] "Generic (PLEG): container finished" podID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerID="6f611dbe3a4e388935f6d106d42d124fc15b557a46cdb2ff260bd9f2f857e234" exitCode=143 Nov 28 07:17:29 crc kubenswrapper[4922]: I1128 07:17:29.861420 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9239175-af51-46ed-a36b-a5b98a1bb790","Type":"ContainerDied","Data":"6f611dbe3a4e388935f6d106d42d124fc15b557a46cdb2ff260bd9f2f857e234"} Nov 28 07:17:32 crc kubenswrapper[4922]: I1128 07:17:32.893009 4922 generic.go:334] "Generic (PLEG): container finished" podID="97450035-755a-422c-ade7-b3bd97b917cd" containerID="0c67e228e5c4031baab99deaaa17d2e3b356c3de5d17e6c1f5a5e4b5ad468777" exitCode=0 Nov 28 07:17:32 crc kubenswrapper[4922]: I1128 07:17:32.893074 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m8jjn" event={"ID":"97450035-755a-422c-ade7-b3bd97b917cd","Type":"ContainerDied","Data":"0c67e228e5c4031baab99deaaa17d2e3b356c3de5d17e6c1f5a5e4b5ad468777"} Nov 28 07:17:33 crc kubenswrapper[4922]: I1128 07:17:33.909197 4922 generic.go:334] "Generic (PLEG): container finished" podID="2844ae42-cefd-4863-b8f4-95d253b1a5b4" containerID="2b64afb60c87216a1524a5214f7e0fc6872a12dbbbbe2a4ea9c5c09709f0b040" exitCode=0 Nov 28 07:17:33 crc kubenswrapper[4922]: I1128 07:17:33.909282 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-ncg57" event={"ID":"2844ae42-cefd-4863-b8f4-95d253b1a5b4","Type":"ContainerDied","Data":"2b64afb60c87216a1524a5214f7e0fc6872a12dbbbbe2a4ea9c5c09709f0b040"} Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.088297 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.120625 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.317672 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.318159 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.357816 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.360726 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.454914 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c55f6679-c2kdb"] Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.455124 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" podUID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" containerName="dnsmasq-dns" containerID="cri-o://e41fe74d51b86bfbb91f2da8a028e499fce1d7462fc60532955d0af580095be8" gracePeriod=10 Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.511029 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-config-data\") pod \"97450035-755a-422c-ade7-b3bd97b917cd\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.511788 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zklj7\" (UniqueName: \"kubernetes.io/projected/97450035-755a-422c-ade7-b3bd97b917cd-kube-api-access-zklj7\") pod \"97450035-755a-422c-ade7-b3bd97b917cd\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.511835 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-scripts\") pod \"97450035-755a-422c-ade7-b3bd97b917cd\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.511886 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-combined-ca-bundle\") pod \"97450035-755a-422c-ade7-b3bd97b917cd\" (UID: \"97450035-755a-422c-ade7-b3bd97b917cd\") " Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.520856 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97450035-755a-422c-ade7-b3bd97b917cd-kube-api-access-zklj7" (OuterVolumeSpecName: "kube-api-access-zklj7") pod "97450035-755a-422c-ade7-b3bd97b917cd" (UID: "97450035-755a-422c-ade7-b3bd97b917cd"). InnerVolumeSpecName "kube-api-access-zklj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.523238 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-scripts" (OuterVolumeSpecName: "scripts") pod "97450035-755a-422c-ade7-b3bd97b917cd" (UID: "97450035-755a-422c-ade7-b3bd97b917cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.544375 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-config-data" (OuterVolumeSpecName: "config-data") pod "97450035-755a-422c-ade7-b3bd97b917cd" (UID: "97450035-755a-422c-ade7-b3bd97b917cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.555638 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97450035-755a-422c-ade7-b3bd97b917cd" (UID: "97450035-755a-422c-ade7-b3bd97b917cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.654034 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zklj7\" (UniqueName: \"kubernetes.io/projected/97450035-755a-422c-ade7-b3bd97b917cd-kube-api-access-zklj7\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.654064 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.654074 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.654082 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97450035-755a-422c-ade7-b3bd97b917cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.922401 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m8jjn" event={"ID":"97450035-755a-422c-ade7-b3bd97b917cd","Type":"ContainerDied","Data":"772905e0e25ce6c6fc6050f18ff5bd4d99d527ddda0058cc0a0b5e92a75f5195"} Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.922430 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m8jjn" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.922447 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="772905e0e25ce6c6fc6050f18ff5bd4d99d527ddda0058cc0a0b5e92a75f5195" Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.933903 4922 generic.go:334] "Generic (PLEG): container finished" podID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" containerID="e41fe74d51b86bfbb91f2da8a028e499fce1d7462fc60532955d0af580095be8" exitCode=0 Nov 28 07:17:34 crc kubenswrapper[4922]: I1128 07:17:34.934286 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" event={"ID":"8c44439c-6a1b-430d-9dd1-8b0c16033ac1","Type":"ContainerDied","Data":"e41fe74d51b86bfbb91f2da8a028e499fce1d7462fc60532955d0af580095be8"} Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.003909 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.066816 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.100381 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.117720 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.164676 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-svc\") pod \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.164781 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-sb\") pod \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.164877 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-nb\") pod \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.164941 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-swift-storage-0\") pod \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.164996 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lthm\" (UniqueName: \"kubernetes.io/projected/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-kube-api-access-4lthm\") pod \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.165089 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-config\") pod \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\" (UID: \"8c44439c-6a1b-430d-9dd1-8b0c16033ac1\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.181417 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-kube-api-access-4lthm" (OuterVolumeSpecName: "kube-api-access-4lthm") pod "8c44439c-6a1b-430d-9dd1-8b0c16033ac1" (UID: "8c44439c-6a1b-430d-9dd1-8b0c16033ac1"). InnerVolumeSpecName "kube-api-access-4lthm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.235065 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8c44439c-6a1b-430d-9dd1-8b0c16033ac1" (UID: "8c44439c-6a1b-430d-9dd1-8b0c16033ac1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.263924 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8c44439c-6a1b-430d-9dd1-8b0c16033ac1" (UID: "8c44439c-6a1b-430d-9dd1-8b0c16033ac1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.267478 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lthm\" (UniqueName: \"kubernetes.io/projected/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-kube-api-access-4lthm\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.267521 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.267535 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.273099 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8c44439c-6a1b-430d-9dd1-8b0c16033ac1" (UID: "8c44439c-6a1b-430d-9dd1-8b0c16033ac1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.283872 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-config" (OuterVolumeSpecName: "config") pod "8c44439c-6a1b-430d-9dd1-8b0c16033ac1" (UID: "8c44439c-6a1b-430d-9dd1-8b0c16033ac1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.290916 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8c44439c-6a1b-430d-9dd1-8b0c16033ac1" (UID: "8c44439c-6a1b-430d-9dd1-8b0c16033ac1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.369470 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.369507 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.369519 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c44439c-6a1b-430d-9dd1-8b0c16033ac1-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.381639 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.402961 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.184:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.403044 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.184:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.470315 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-scripts\") pod \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.470410 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rthl7\" (UniqueName: \"kubernetes.io/projected/2844ae42-cefd-4863-b8f4-95d253b1a5b4-kube-api-access-rthl7\") pod \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.470542 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-config-data\") pod \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.470583 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-combined-ca-bundle\") pod \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\" (UID: \"2844ae42-cefd-4863-b8f4-95d253b1a5b4\") " Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.475423 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-scripts" (OuterVolumeSpecName: "scripts") pod "2844ae42-cefd-4863-b8f4-95d253b1a5b4" (UID: "2844ae42-cefd-4863-b8f4-95d253b1a5b4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.477587 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2844ae42-cefd-4863-b8f4-95d253b1a5b4-kube-api-access-rthl7" (OuterVolumeSpecName: "kube-api-access-rthl7") pod "2844ae42-cefd-4863-b8f4-95d253b1a5b4" (UID: "2844ae42-cefd-4863-b8f4-95d253b1a5b4"). InnerVolumeSpecName "kube-api-access-rthl7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.518655 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-config-data" (OuterVolumeSpecName: "config-data") pod "2844ae42-cefd-4863-b8f4-95d253b1a5b4" (UID: "2844ae42-cefd-4863-b8f4-95d253b1a5b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.535952 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2844ae42-cefd-4863-b8f4-95d253b1a5b4" (UID: "2844ae42-cefd-4863-b8f4-95d253b1a5b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.573383 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.573413 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rthl7\" (UniqueName: \"kubernetes.io/projected/2844ae42-cefd-4863-b8f4-95d253b1a5b4-kube-api-access-rthl7\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.573427 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.573437 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2844ae42-cefd-4863-b8f4-95d253b1a5b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.944924 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.945147 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-c55f6679-c2kdb" event={"ID":"8c44439c-6a1b-430d-9dd1-8b0c16033ac1","Type":"ContainerDied","Data":"d6701e3e9ced41b757d35abea2f684ee6daea191c18d96765c7bde2f8a477df1"} Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.945366 4922 scope.go:117] "RemoveContainer" containerID="e41fe74d51b86bfbb91f2da8a028e499fce1d7462fc60532955d0af580095be8" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.947359 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-log" containerID="cri-o://0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92" gracePeriod=30 Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.947550 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-ncg57" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.948847 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-ncg57" event={"ID":"2844ae42-cefd-4863-b8f4-95d253b1a5b4","Type":"ContainerDied","Data":"aac55a923e9705e44ddffe83cdd0f791d921cd7b8ab2eeddae2f7d55ed15a910"} Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.948983 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aac55a923e9705e44ddffe83cdd0f791d921cd7b8ab2eeddae2f7d55ed15a910" Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.956608 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-api" containerID="cri-o://7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce" gracePeriod=30 Nov 28 07:17:35 crc kubenswrapper[4922]: I1128 07:17:35.994779 4922 scope.go:117] "RemoveContainer" containerID="5994f721051c71898df7f01da6650b3257270ddbeac198a8a7cc0dea37ebd0ae" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.033278 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-c55f6679-c2kdb"] Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.044149 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-c55f6679-c2kdb"] Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.077158 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 07:17:36 crc kubenswrapper[4922]: E1128 07:17:36.077520 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" containerName="init" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.077535 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" containerName="init" Nov 28 07:17:36 crc kubenswrapper[4922]: E1128 07:17:36.077560 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2844ae42-cefd-4863-b8f4-95d253b1a5b4" containerName="nova-cell1-conductor-db-sync" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.077567 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2844ae42-cefd-4863-b8f4-95d253b1a5b4" containerName="nova-cell1-conductor-db-sync" Nov 28 07:17:36 crc kubenswrapper[4922]: E1128 07:17:36.077580 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" containerName="dnsmasq-dns" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.077586 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" containerName="dnsmasq-dns" Nov 28 07:17:36 crc kubenswrapper[4922]: E1128 07:17:36.077601 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97450035-755a-422c-ade7-b3bd97b917cd" containerName="nova-manage" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.077608 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="97450035-755a-422c-ade7-b3bd97b917cd" containerName="nova-manage" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.077768 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="97450035-755a-422c-ade7-b3bd97b917cd" containerName="nova-manage" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.077781 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" containerName="dnsmasq-dns" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.077802 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2844ae42-cefd-4863-b8f4-95d253b1a5b4" containerName="nova-cell1-conductor-db-sync" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.078399 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.084920 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.105182 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.186019 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.186115 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm2rq\" (UniqueName: \"kubernetes.io/projected/b2566655-e076-471c-af4c-1e218f70ebe1-kube-api-access-sm2rq\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.186180 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.287420 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm2rq\" (UniqueName: \"kubernetes.io/projected/b2566655-e076-471c-af4c-1e218f70ebe1-kube-api-access-sm2rq\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.287525 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.287601 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.291955 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.292445 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.306776 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm2rq\" (UniqueName: \"kubernetes.io/projected/b2566655-e076-471c-af4c-1e218f70ebe1-kube-api-access-sm2rq\") pod \"nova-cell1-conductor-0\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.403777 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.911385 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.956815 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b2566655-e076-471c-af4c-1e218f70ebe1","Type":"ContainerStarted","Data":"819311f28c457a5a7faf754afca98ecda7331c7ca3e9872a95371658a82f12de"} Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.960671 4922 generic.go:334] "Generic (PLEG): container finished" podID="7216d230-a69f-4954-96aa-cc4b95403298" containerID="0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92" exitCode=143 Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.961011 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="57109065-ce83-4dea-96c1-48c94d854c36" containerName="nova-scheduler-scheduler" containerID="cri-o://4e0c07a0ee25c791acbccef9889e9bd19a58b0f338022e8f11c9040d8c7194b4" gracePeriod=30 Nov 28 07:17:36 crc kubenswrapper[4922]: I1128 07:17:36.961076 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7216d230-a69f-4954-96aa-cc4b95403298","Type":"ContainerDied","Data":"0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92"} Nov 28 07:17:37 crc kubenswrapper[4922]: I1128 07:17:37.409888 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c44439c-6a1b-430d-9dd1-8b0c16033ac1" path="/var/lib/kubelet/pods/8c44439c-6a1b-430d-9dd1-8b0c16033ac1/volumes" Nov 28 07:17:37 crc kubenswrapper[4922]: I1128 07:17:37.972441 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b2566655-e076-471c-af4c-1e218f70ebe1","Type":"ContainerStarted","Data":"94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3"} Nov 28 07:17:37 crc kubenswrapper[4922]: I1128 07:17:37.972681 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:37 crc kubenswrapper[4922]: I1128 07:17:37.999682 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=1.999662466 podStartE2EDuration="1.999662466s" podCreationTimestamp="2025-11-28 07:17:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:17:37.998581787 +0000 UTC m=+1502.918977409" watchObservedRunningTime="2025-11-28 07:17:37.999662466 +0000 UTC m=+1502.920058058" Nov 28 07:17:39 crc kubenswrapper[4922]: E1128 07:17:39.090483 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4e0c07a0ee25c791acbccef9889e9bd19a58b0f338022e8f11c9040d8c7194b4" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 07:17:39 crc kubenswrapper[4922]: E1128 07:17:39.092794 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4e0c07a0ee25c791acbccef9889e9bd19a58b0f338022e8f11c9040d8c7194b4" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 07:17:39 crc kubenswrapper[4922]: E1128 07:17:39.094989 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4e0c07a0ee25c791acbccef9889e9bd19a58b0f338022e8f11c9040d8c7194b4" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 07:17:39 crc kubenswrapper[4922]: E1128 07:17:39.095041 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="57109065-ce83-4dea-96c1-48c94d854c36" containerName="nova-scheduler-scheduler" Nov 28 07:17:39 crc kubenswrapper[4922]: I1128 07:17:39.926250 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.000140 4922 generic.go:334] "Generic (PLEG): container finished" podID="57109065-ce83-4dea-96c1-48c94d854c36" containerID="4e0c07a0ee25c791acbccef9889e9bd19a58b0f338022e8f11c9040d8c7194b4" exitCode=0 Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.000200 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"57109065-ce83-4dea-96c1-48c94d854c36","Type":"ContainerDied","Data":"4e0c07a0ee25c791acbccef9889e9bd19a58b0f338022e8f11c9040d8c7194b4"} Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.535457 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.590726 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75xpd\" (UniqueName: \"kubernetes.io/projected/57109065-ce83-4dea-96c1-48c94d854c36-kube-api-access-75xpd\") pod \"57109065-ce83-4dea-96c1-48c94d854c36\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.591359 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-combined-ca-bundle\") pod \"57109065-ce83-4dea-96c1-48c94d854c36\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.591653 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-config-data\") pod \"57109065-ce83-4dea-96c1-48c94d854c36\" (UID: \"57109065-ce83-4dea-96c1-48c94d854c36\") " Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.597005 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57109065-ce83-4dea-96c1-48c94d854c36-kube-api-access-75xpd" (OuterVolumeSpecName: "kube-api-access-75xpd") pod "57109065-ce83-4dea-96c1-48c94d854c36" (UID: "57109065-ce83-4dea-96c1-48c94d854c36"). InnerVolumeSpecName "kube-api-access-75xpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.632163 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-config-data" (OuterVolumeSpecName: "config-data") pod "57109065-ce83-4dea-96c1-48c94d854c36" (UID: "57109065-ce83-4dea-96c1-48c94d854c36"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.644874 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.647144 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57109065-ce83-4dea-96c1-48c94d854c36" (UID: "57109065-ce83-4dea-96c1-48c94d854c36"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.693692 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h92b7\" (UniqueName: \"kubernetes.io/projected/7216d230-a69f-4954-96aa-cc4b95403298-kube-api-access-h92b7\") pod \"7216d230-a69f-4954-96aa-cc4b95403298\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.693870 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7216d230-a69f-4954-96aa-cc4b95403298-logs\") pod \"7216d230-a69f-4954-96aa-cc4b95403298\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.693927 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-config-data\") pod \"7216d230-a69f-4954-96aa-cc4b95403298\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.693960 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-combined-ca-bundle\") pod \"7216d230-a69f-4954-96aa-cc4b95403298\" (UID: \"7216d230-a69f-4954-96aa-cc4b95403298\") " Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.694323 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.694341 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57109065-ce83-4dea-96c1-48c94d854c36-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.694350 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75xpd\" (UniqueName: \"kubernetes.io/projected/57109065-ce83-4dea-96c1-48c94d854c36-kube-api-access-75xpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.696818 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7216d230-a69f-4954-96aa-cc4b95403298-logs" (OuterVolumeSpecName: "logs") pod "7216d230-a69f-4954-96aa-cc4b95403298" (UID: "7216d230-a69f-4954-96aa-cc4b95403298"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.698836 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7216d230-a69f-4954-96aa-cc4b95403298-kube-api-access-h92b7" (OuterVolumeSpecName: "kube-api-access-h92b7") pod "7216d230-a69f-4954-96aa-cc4b95403298" (UID: "7216d230-a69f-4954-96aa-cc4b95403298"). InnerVolumeSpecName "kube-api-access-h92b7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.717748 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7216d230-a69f-4954-96aa-cc4b95403298" (UID: "7216d230-a69f-4954-96aa-cc4b95403298"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.734754 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-config-data" (OuterVolumeSpecName: "config-data") pod "7216d230-a69f-4954-96aa-cc4b95403298" (UID: "7216d230-a69f-4954-96aa-cc4b95403298"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.796286 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h92b7\" (UniqueName: \"kubernetes.io/projected/7216d230-a69f-4954-96aa-cc4b95403298-kube-api-access-h92b7\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.796319 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7216d230-a69f-4954-96aa-cc4b95403298-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.796330 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:40 crc kubenswrapper[4922]: I1128 07:17:40.796338 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7216d230-a69f-4954-96aa-cc4b95403298-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.011727 4922 generic.go:334] "Generic (PLEG): container finished" podID="7216d230-a69f-4954-96aa-cc4b95403298" containerID="7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce" exitCode=0 Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.011806 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.011837 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7216d230-a69f-4954-96aa-cc4b95403298","Type":"ContainerDied","Data":"7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce"} Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.011914 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7216d230-a69f-4954-96aa-cc4b95403298","Type":"ContainerDied","Data":"ad08734605bf274f3a0270035886d0ad3dfec740c0d5babfd0bbdf2a68b37d8f"} Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.011940 4922 scope.go:117] "RemoveContainer" containerID="7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.014350 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"57109065-ce83-4dea-96c1-48c94d854c36","Type":"ContainerDied","Data":"1dcead371a0ef9c9fcaa760ac003c2f71b770c1de11c466a5d00ef3a7bbbffc8"} Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.014455 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.073611 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.074088 4922 scope.go:117] "RemoveContainer" containerID="0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.090245 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.110836 4922 scope.go:117] "RemoveContainer" containerID="7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce" Nov 28 07:17:41 crc kubenswrapper[4922]: E1128 07:17:41.111543 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce\": container with ID starting with 7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce not found: ID does not exist" containerID="7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.111578 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce"} err="failed to get container status \"7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce\": rpc error: code = NotFound desc = could not find container \"7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce\": container with ID starting with 7e0fd255ec314f2db264c78401893b89111b326a63b7e3b4f3817ab89a0ea5ce not found: ID does not exist" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.111603 4922 scope.go:117] "RemoveContainer" containerID="0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92" Nov 28 07:17:41 crc kubenswrapper[4922]: E1128 07:17:41.112719 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92\": container with ID starting with 0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92 not found: ID does not exist" containerID="0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.112753 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92"} err="failed to get container status \"0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92\": rpc error: code = NotFound desc = could not find container \"0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92\": container with ID starting with 0d1296f5ee0a8d9c56566a276bb8294e6675fb669dbbb93010c62a5e2f07fc92 not found: ID does not exist" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.112772 4922 scope.go:117] "RemoveContainer" containerID="4e0c07a0ee25c791acbccef9889e9bd19a58b0f338022e8f11c9040d8c7194b4" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.113467 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.123560 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.135298 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: E1128 07:17:41.135842 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57109065-ce83-4dea-96c1-48c94d854c36" containerName="nova-scheduler-scheduler" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.135865 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="57109065-ce83-4dea-96c1-48c94d854c36" containerName="nova-scheduler-scheduler" Nov 28 07:17:41 crc kubenswrapper[4922]: E1128 07:17:41.135881 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-api" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.135890 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-api" Nov 28 07:17:41 crc kubenswrapper[4922]: E1128 07:17:41.135905 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-log" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.135913 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-log" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.136165 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-log" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.136190 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="57109065-ce83-4dea-96c1-48c94d854c36" containerName="nova-scheduler-scheduler" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.136203 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7216d230-a69f-4954-96aa-cc4b95403298" containerName="nova-api-api" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.136985 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.139170 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.149310 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.155467 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.157035 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.159048 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.163374 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.203469 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg6qc\" (UniqueName: \"kubernetes.io/projected/541557ff-fa14-4f6c-b957-716ba8fdb38e-kube-api-access-sg6qc\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.203660 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.203766 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/541557ff-fa14-4f6c-b957-716ba8fdb38e-logs\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.204021 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfxtq\" (UniqueName: \"kubernetes.io/projected/0422898c-d82b-4976-add8-6d15f54781e1-kube-api-access-wfxtq\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.204108 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-config-data\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.204131 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.204183 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-config-data\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.305580 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg6qc\" (UniqueName: \"kubernetes.io/projected/541557ff-fa14-4f6c-b957-716ba8fdb38e-kube-api-access-sg6qc\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.305667 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.305696 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/541557ff-fa14-4f6c-b957-716ba8fdb38e-logs\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.305738 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfxtq\" (UniqueName: \"kubernetes.io/projected/0422898c-d82b-4976-add8-6d15f54781e1-kube-api-access-wfxtq\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.305759 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-config-data\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.305774 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.305795 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-config-data\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.306283 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/541557ff-fa14-4f6c-b957-716ba8fdb38e-logs\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.309479 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-config-data\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.310944 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.316709 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.319491 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-config-data\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.323265 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg6qc\" (UniqueName: \"kubernetes.io/projected/541557ff-fa14-4f6c-b957-716ba8fdb38e-kube-api-access-sg6qc\") pod \"nova-api-0\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.323422 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfxtq\" (UniqueName: \"kubernetes.io/projected/0422898c-d82b-4976-add8-6d15f54781e1-kube-api-access-wfxtq\") pod \"nova-scheduler-0\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.410511 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57109065-ce83-4dea-96c1-48c94d854c36" path="/var/lib/kubelet/pods/57109065-ce83-4dea-96c1-48c94d854c36/volumes" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.411127 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7216d230-a69f-4954-96aa-cc4b95403298" path="/var/lib/kubelet/pods/7216d230-a69f-4954-96aa-cc4b95403298/volumes" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.452406 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.471493 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.935527 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:17:41 crc kubenswrapper[4922]: I1128 07:17:41.987953 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:17:42 crc kubenswrapper[4922]: I1128 07:17:42.035338 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"541557ff-fa14-4f6c-b957-716ba8fdb38e","Type":"ContainerStarted","Data":"111ac3a70f185bf85575d6320d90c3fb43a0236bf9cc734aca3b441aaa7b310e"} Nov 28 07:17:42 crc kubenswrapper[4922]: I1128 07:17:42.039395 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0422898c-d82b-4976-add8-6d15f54781e1","Type":"ContainerStarted","Data":"816b230dfadee572c4e2cfb2f41818265fe2573d5d5aaee24302ef0d0e0ccadc"} Nov 28 07:17:43 crc kubenswrapper[4922]: I1128 07:17:43.051586 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"541557ff-fa14-4f6c-b957-716ba8fdb38e","Type":"ContainerStarted","Data":"db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b"} Nov 28 07:17:43 crc kubenswrapper[4922]: I1128 07:17:43.051902 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"541557ff-fa14-4f6c-b957-716ba8fdb38e","Type":"ContainerStarted","Data":"c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1"} Nov 28 07:17:43 crc kubenswrapper[4922]: I1128 07:17:43.053492 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0422898c-d82b-4976-add8-6d15f54781e1","Type":"ContainerStarted","Data":"ac60a9b1c6addd63b2ba146072d744df055b956e6e9bfa45aa75d108e531614f"} Nov 28 07:17:43 crc kubenswrapper[4922]: I1128 07:17:43.075494 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.075477593 podStartE2EDuration="2.075477593s" podCreationTimestamp="2025-11-28 07:17:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:17:43.069877024 +0000 UTC m=+1507.990272616" watchObservedRunningTime="2025-11-28 07:17:43.075477593 +0000 UTC m=+1507.995873185" Nov 28 07:17:43 crc kubenswrapper[4922]: I1128 07:17:43.098821 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.098801024 podStartE2EDuration="2.098801024s" podCreationTimestamp="2025-11-28 07:17:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:17:43.095577079 +0000 UTC m=+1508.015972671" watchObservedRunningTime="2025-11-28 07:17:43.098801024 +0000 UTC m=+1508.019196626" Nov 28 07:17:43 crc kubenswrapper[4922]: I1128 07:17:43.723576 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:17:43 crc kubenswrapper[4922]: I1128 07:17:43.724135 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="43f49b9c-475e-4b28-ada6-e73db47e4bd7" containerName="kube-state-metrics" containerID="cri-o://2e7cddf6b0653261ca2f1e88ef62868195f7ba744ecd3dbed01efef8d6511634" gracePeriod=30 Nov 28 07:17:44 crc kubenswrapper[4922]: I1128 07:17:44.065188 4922 generic.go:334] "Generic (PLEG): container finished" podID="43f49b9c-475e-4b28-ada6-e73db47e4bd7" containerID="2e7cddf6b0653261ca2f1e88ef62868195f7ba744ecd3dbed01efef8d6511634" exitCode=2 Nov 28 07:17:44 crc kubenswrapper[4922]: I1128 07:17:44.065471 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"43f49b9c-475e-4b28-ada6-e73db47e4bd7","Type":"ContainerDied","Data":"2e7cddf6b0653261ca2f1e88ef62868195f7ba744ecd3dbed01efef8d6511634"} Nov 28 07:17:44 crc kubenswrapper[4922]: I1128 07:17:44.240803 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 07:17:44 crc kubenswrapper[4922]: I1128 07:17:44.273817 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnbq2\" (UniqueName: \"kubernetes.io/projected/43f49b9c-475e-4b28-ada6-e73db47e4bd7-kube-api-access-jnbq2\") pod \"43f49b9c-475e-4b28-ada6-e73db47e4bd7\" (UID: \"43f49b9c-475e-4b28-ada6-e73db47e4bd7\") " Nov 28 07:17:44 crc kubenswrapper[4922]: I1128 07:17:44.279490 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43f49b9c-475e-4b28-ada6-e73db47e4bd7-kube-api-access-jnbq2" (OuterVolumeSpecName: "kube-api-access-jnbq2") pod "43f49b9c-475e-4b28-ada6-e73db47e4bd7" (UID: "43f49b9c-475e-4b28-ada6-e73db47e4bd7"). InnerVolumeSpecName "kube-api-access-jnbq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:44 crc kubenswrapper[4922]: I1128 07:17:44.375662 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnbq2\" (UniqueName: \"kubernetes.io/projected/43f49b9c-475e-4b28-ada6-e73db47e4bd7-kube-api-access-jnbq2\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.078149 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"43f49b9c-475e-4b28-ada6-e73db47e4bd7","Type":"ContainerDied","Data":"efdc7798db4213709ae0ccc482f0aefa0e60cade5343f3783c160883f6206306"} Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.078201 4922 scope.go:117] "RemoveContainer" containerID="2e7cddf6b0653261ca2f1e88ef62868195f7ba744ecd3dbed01efef8d6511634" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.078279 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.129159 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.145757 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.158826 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:17:45 crc kubenswrapper[4922]: E1128 07:17:45.159502 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43f49b9c-475e-4b28-ada6-e73db47e4bd7" containerName="kube-state-metrics" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.159523 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="43f49b9c-475e-4b28-ada6-e73db47e4bd7" containerName="kube-state-metrics" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.159844 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="43f49b9c-475e-4b28-ada6-e73db47e4bd7" containerName="kube-state-metrics" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.160799 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.164462 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.164720 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.169691 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.292030 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.292278 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqp4r\" (UniqueName: \"kubernetes.io/projected/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-api-access-sqp4r\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.292692 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.292797 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.394734 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.394811 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqp4r\" (UniqueName: \"kubernetes.io/projected/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-api-access-sqp4r\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.394884 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.394925 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.403768 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.406076 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.412937 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.414095 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqp4r\" (UniqueName: \"kubernetes.io/projected/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-api-access-sqp4r\") pod \"kube-state-metrics-0\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.419928 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43f49b9c-475e-4b28-ada6-e73db47e4bd7" path="/var/lib/kubelet/pods/43f49b9c-475e-4b28-ada6-e73db47e4bd7/volumes" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.510639 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.686271 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.686600 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="ceilometer-central-agent" containerID="cri-o://24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e" gracePeriod=30 Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.687135 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="proxy-httpd" containerID="cri-o://10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858" gracePeriod=30 Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.687192 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="sg-core" containerID="cri-o://3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d" gracePeriod=30 Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.687252 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="ceilometer-notification-agent" containerID="cri-o://df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93" gracePeriod=30 Nov 28 07:17:45 crc kubenswrapper[4922]: I1128 07:17:45.986254 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:17:46 crc kubenswrapper[4922]: I1128 07:17:46.001561 4922 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 07:17:46 crc kubenswrapper[4922]: I1128 07:17:46.093411 4922 generic.go:334] "Generic (PLEG): container finished" podID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerID="10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858" exitCode=0 Nov 28 07:17:46 crc kubenswrapper[4922]: I1128 07:17:46.093450 4922 generic.go:334] "Generic (PLEG): container finished" podID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerID="3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d" exitCode=2 Nov 28 07:17:46 crc kubenswrapper[4922]: I1128 07:17:46.093499 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerDied","Data":"10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858"} Nov 28 07:17:46 crc kubenswrapper[4922]: I1128 07:17:46.093529 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerDied","Data":"3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d"} Nov 28 07:17:46 crc kubenswrapper[4922]: I1128 07:17:46.094547 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f672d6bb-97fc-4547-a14b-af27d631fe2a","Type":"ContainerStarted","Data":"00331edf10326990451fd6b99f8df5e28c199a8ac8a9c97d3c2a18b04fdc3517"} Nov 28 07:17:46 crc kubenswrapper[4922]: I1128 07:17:46.431052 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 07:17:46 crc kubenswrapper[4922]: I1128 07:17:46.453473 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 07:17:47 crc kubenswrapper[4922]: I1128 07:17:47.110156 4922 generic.go:334] "Generic (PLEG): container finished" podID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerID="24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e" exitCode=0 Nov 28 07:17:47 crc kubenswrapper[4922]: I1128 07:17:47.110249 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerDied","Data":"24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e"} Nov 28 07:17:47 crc kubenswrapper[4922]: I1128 07:17:47.113027 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f672d6bb-97fc-4547-a14b-af27d631fe2a","Type":"ContainerStarted","Data":"97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0"} Nov 28 07:17:47 crc kubenswrapper[4922]: I1128 07:17:47.113382 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 07:17:47 crc kubenswrapper[4922]: I1128 07:17:47.173850 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.73431278 podStartE2EDuration="2.173821934s" podCreationTimestamp="2025-11-28 07:17:45 +0000 UTC" firstStartedPulling="2025-11-28 07:17:46.00136665 +0000 UTC m=+1510.921762222" lastFinishedPulling="2025-11-28 07:17:46.440875794 +0000 UTC m=+1511.361271376" observedRunningTime="2025-11-28 07:17:47.135283959 +0000 UTC m=+1512.055679551" watchObservedRunningTime="2025-11-28 07:17:47.173821934 +0000 UTC m=+1512.094217556" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.042557 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.124331 4922 generic.go:334] "Generic (PLEG): container finished" podID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerID="df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93" exitCode=0 Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.124428 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.124417 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerDied","Data":"df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93"} Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.124476 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5","Type":"ContainerDied","Data":"9cd35fa71c7f03b885e0f3947217209e0484a820b584921951bfeed938b4d971"} Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.124503 4922 scope.go:117] "RemoveContainer" containerID="10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.150958 4922 scope.go:117] "RemoveContainer" containerID="3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.167764 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-combined-ca-bundle\") pod \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.167882 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-scripts\") pod \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.167914 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-log-httpd\") pod \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.167931 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-run-httpd\") pod \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.167993 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-config-data\") pod \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.168041 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-sg-core-conf-yaml\") pod \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.168074 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f9pt\" (UniqueName: \"kubernetes.io/projected/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-kube-api-access-7f9pt\") pod \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\" (UID: \"f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5\") " Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.168927 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" (UID: "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.169205 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" (UID: "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.170226 4922 scope.go:117] "RemoveContainer" containerID="df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.172873 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-scripts" (OuterVolumeSpecName: "scripts") pod "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" (UID: "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.173507 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-kube-api-access-7f9pt" (OuterVolumeSpecName: "kube-api-access-7f9pt") pod "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" (UID: "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5"). InnerVolumeSpecName "kube-api-access-7f9pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.198592 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" (UID: "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.241284 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" (UID: "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.259312 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-config-data" (OuterVolumeSpecName: "config-data") pod "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" (UID: "f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.270638 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.270672 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.270685 4922 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.270697 4922 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.270708 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.270721 4922 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.270733 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f9pt\" (UniqueName: \"kubernetes.io/projected/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5-kube-api-access-7f9pt\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.293378 4922 scope.go:117] "RemoveContainer" containerID="24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.313783 4922 scope.go:117] "RemoveContainer" containerID="10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858" Nov 28 07:17:48 crc kubenswrapper[4922]: E1128 07:17:48.314210 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858\": container with ID starting with 10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858 not found: ID does not exist" containerID="10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.314274 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858"} err="failed to get container status \"10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858\": rpc error: code = NotFound desc = could not find container \"10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858\": container with ID starting with 10ff0b42fda9ed0c0ed9344fff7081aed39e685a880052b47458eacb9c0a4858 not found: ID does not exist" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.314299 4922 scope.go:117] "RemoveContainer" containerID="3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d" Nov 28 07:17:48 crc kubenswrapper[4922]: E1128 07:17:48.314599 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d\": container with ID starting with 3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d not found: ID does not exist" containerID="3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.314629 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d"} err="failed to get container status \"3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d\": rpc error: code = NotFound desc = could not find container \"3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d\": container with ID starting with 3eeece16ff3dc32b9e585c6aac40d9542164997b870f99a0f6eb03fdfeb4385d not found: ID does not exist" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.314648 4922 scope.go:117] "RemoveContainer" containerID="df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93" Nov 28 07:17:48 crc kubenswrapper[4922]: E1128 07:17:48.314951 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93\": container with ID starting with df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93 not found: ID does not exist" containerID="df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.314996 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93"} err="failed to get container status \"df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93\": rpc error: code = NotFound desc = could not find container \"df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93\": container with ID starting with df96a23508edaf2f3aa425171422774275166b45b04e08150bfd011989c9be93 not found: ID does not exist" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.315018 4922 scope.go:117] "RemoveContainer" containerID="24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e" Nov 28 07:17:48 crc kubenswrapper[4922]: E1128 07:17:48.315383 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e\": container with ID starting with 24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e not found: ID does not exist" containerID="24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.315414 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e"} err="failed to get container status \"24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e\": rpc error: code = NotFound desc = could not find container \"24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e\": container with ID starting with 24a754b3e28e220f702cbe423f7a8548e10bca4d828b90b098c741f027d4ae2e not found: ID does not exist" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.462371 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.476241 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.486691 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:48 crc kubenswrapper[4922]: E1128 07:17:48.487093 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="ceilometer-central-agent" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.487110 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="ceilometer-central-agent" Nov 28 07:17:48 crc kubenswrapper[4922]: E1128 07:17:48.487123 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="ceilometer-notification-agent" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.487132 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="ceilometer-notification-agent" Nov 28 07:17:48 crc kubenswrapper[4922]: E1128 07:17:48.487161 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="proxy-httpd" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.487168 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="proxy-httpd" Nov 28 07:17:48 crc kubenswrapper[4922]: E1128 07:17:48.487183 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="sg-core" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.487188 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="sg-core" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.487388 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="ceilometer-central-agent" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.487408 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="proxy-httpd" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.487419 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="ceilometer-notification-agent" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.487434 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" containerName="sg-core" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.489122 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.491878 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.491951 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.492064 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.494870 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.677185 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-log-httpd\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.677247 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.677277 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2x27\" (UniqueName: \"kubernetes.io/projected/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-kube-api-access-k2x27\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.677381 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.677401 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.677425 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-config-data\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.677444 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-scripts\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.677461 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-run-httpd\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.779038 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.779096 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.779139 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-config-data\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.779165 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-scripts\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.779193 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-run-httpd\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.779310 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-log-httpd\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.779355 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.779376 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2x27\" (UniqueName: \"kubernetes.io/projected/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-kube-api-access-k2x27\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.780903 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-run-httpd\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.781570 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-log-httpd\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.783848 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.789471 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-config-data\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.791350 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.793474 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-scripts\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.793647 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:48 crc kubenswrapper[4922]: I1128 07:17:48.816194 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2x27\" (UniqueName: \"kubernetes.io/projected/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-kube-api-access-k2x27\") pod \"ceilometer-0\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " pod="openstack/ceilometer-0" Nov 28 07:17:49 crc kubenswrapper[4922]: I1128 07:17:49.107899 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:17:49 crc kubenswrapper[4922]: I1128 07:17:49.415678 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5" path="/var/lib/kubelet/pods/f7109c3e-fe30-4320-bc5b-77e8d8d9fcd5/volumes" Nov 28 07:17:49 crc kubenswrapper[4922]: I1128 07:17:49.605611 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:17:50 crc kubenswrapper[4922]: I1128 07:17:50.149387 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerStarted","Data":"45edf549ff46b90859a47fcda8568d9a596b1d48e1c19581d54428f4b013274b"} Nov 28 07:17:51 crc kubenswrapper[4922]: I1128 07:17:51.164443 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerStarted","Data":"20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc"} Nov 28 07:17:51 crc kubenswrapper[4922]: I1128 07:17:51.164826 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerStarted","Data":"8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602"} Nov 28 07:17:51 crc kubenswrapper[4922]: I1128 07:17:51.452602 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 07:17:51 crc kubenswrapper[4922]: I1128 07:17:51.471969 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 07:17:51 crc kubenswrapper[4922]: I1128 07:17:51.472041 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 07:17:51 crc kubenswrapper[4922]: I1128 07:17:51.484557 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 07:17:52 crc kubenswrapper[4922]: I1128 07:17:52.177499 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerStarted","Data":"65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078"} Nov 28 07:17:52 crc kubenswrapper[4922]: I1128 07:17:52.213329 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 07:17:52 crc kubenswrapper[4922]: I1128 07:17:52.513422 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.190:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 07:17:52 crc kubenswrapper[4922]: I1128 07:17:52.555505 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.190:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 07:17:53 crc kubenswrapper[4922]: E1128 07:17:53.254635 4922 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/2a205b98a3ce0f5cdcfd043f13e4ab022899bb4edcc7d77fe9a4bfbbdb623469/diff" to get inode usage: stat /var/lib/containers/storage/overlay/2a205b98a3ce0f5cdcfd043f13e4ab022899bb4edcc7d77fe9a4bfbbdb623469/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_kube-state-metrics-0_43f49b9c-475e-4b28-ada6-e73db47e4bd7/kube-state-metrics/0.log" to get inode usage: stat /var/log/pods/openstack_kube-state-metrics-0_43f49b9c-475e-4b28-ada6-e73db47e4bd7/kube-state-metrics/0.log: no such file or directory Nov 28 07:17:54 crc kubenswrapper[4922]: I1128 07:17:54.195321 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerStarted","Data":"4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b"} Nov 28 07:17:54 crc kubenswrapper[4922]: I1128 07:17:54.195650 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 07:17:54 crc kubenswrapper[4922]: I1128 07:17:54.218474 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.828226372 podStartE2EDuration="6.218453474s" podCreationTimestamp="2025-11-28 07:17:48 +0000 UTC" firstStartedPulling="2025-11-28 07:17:49.611348966 +0000 UTC m=+1514.531744548" lastFinishedPulling="2025-11-28 07:17:53.001576078 +0000 UTC m=+1517.921971650" observedRunningTime="2025-11-28 07:17:54.21305371 +0000 UTC m=+1519.133449302" watchObservedRunningTime="2025-11-28 07:17:54.218453474 +0000 UTC m=+1519.138849056" Nov 28 07:17:55 crc kubenswrapper[4922]: I1128 07:17:55.523343 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 07:17:59 crc kubenswrapper[4922]: E1128 07:17:59.197290 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56dcaf4a_8a1b_471b_9f04_c372f15b8fa5.slice/crio-a009cbf707531870a158c461b6eb3d1c1b0328701de2624ac27d511eb95f3f7f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod56dcaf4a_8a1b_471b_9f04_c372f15b8fa5.slice/crio-conmon-a009cbf707531870a158c461b6eb3d1c1b0328701de2624ac27d511eb95f3f7f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9239175_af51_46ed_a36b_a5b98a1bb790.slice/crio-conmon-a7ce397f63e2c6437b00b9fae1a00e0f14b52fe35434a7c615e78de81dc1cdf2.scope\": RecentStats: unable to find data in memory cache]" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.271019 4922 generic.go:334] "Generic (PLEG): container finished" podID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerID="a7ce397f63e2c6437b00b9fae1a00e0f14b52fe35434a7c615e78de81dc1cdf2" exitCode=137 Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.271466 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9239175-af51-46ed-a36b-a5b98a1bb790","Type":"ContainerDied","Data":"a7ce397f63e2c6437b00b9fae1a00e0f14b52fe35434a7c615e78de81dc1cdf2"} Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.275675 4922 generic.go:334] "Generic (PLEG): container finished" podID="56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" containerID="a009cbf707531870a158c461b6eb3d1c1b0328701de2624ac27d511eb95f3f7f" exitCode=137 Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.275720 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5","Type":"ContainerDied","Data":"a009cbf707531870a158c461b6eb3d1c1b0328701de2624ac27d511eb95f3f7f"} Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.403539 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.412381 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.433289 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-config-data\") pod \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.433376 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-combined-ca-bundle\") pod \"b9239175-af51-46ed-a36b-a5b98a1bb790\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.433424 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9239175-af51-46ed-a36b-a5b98a1bb790-logs\") pod \"b9239175-af51-46ed-a36b-a5b98a1bb790\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.433447 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9hc2\" (UniqueName: \"kubernetes.io/projected/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-kube-api-access-t9hc2\") pod \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.433484 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-config-data\") pod \"b9239175-af51-46ed-a36b-a5b98a1bb790\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.433543 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws7mm\" (UniqueName: \"kubernetes.io/projected/b9239175-af51-46ed-a36b-a5b98a1bb790-kube-api-access-ws7mm\") pod \"b9239175-af51-46ed-a36b-a5b98a1bb790\" (UID: \"b9239175-af51-46ed-a36b-a5b98a1bb790\") " Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.433572 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-combined-ca-bundle\") pod \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\" (UID: \"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5\") " Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.437385 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9239175-af51-46ed-a36b-a5b98a1bb790-logs" (OuterVolumeSpecName: "logs") pod "b9239175-af51-46ed-a36b-a5b98a1bb790" (UID: "b9239175-af51-46ed-a36b-a5b98a1bb790"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.441300 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9239175-af51-46ed-a36b-a5b98a1bb790-kube-api-access-ws7mm" (OuterVolumeSpecName: "kube-api-access-ws7mm") pod "b9239175-af51-46ed-a36b-a5b98a1bb790" (UID: "b9239175-af51-46ed-a36b-a5b98a1bb790"). InnerVolumeSpecName "kube-api-access-ws7mm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.446620 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-kube-api-access-t9hc2" (OuterVolumeSpecName: "kube-api-access-t9hc2") pod "56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" (UID: "56dcaf4a-8a1b-471b-9f04-c372f15b8fa5"). InnerVolumeSpecName "kube-api-access-t9hc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.465696 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9239175-af51-46ed-a36b-a5b98a1bb790" (UID: "b9239175-af51-46ed-a36b-a5b98a1bb790"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.468367 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-config-data" (OuterVolumeSpecName: "config-data") pod "56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" (UID: "56dcaf4a-8a1b-471b-9f04-c372f15b8fa5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.473933 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-config-data" (OuterVolumeSpecName: "config-data") pod "b9239175-af51-46ed-a36b-a5b98a1bb790" (UID: "b9239175-af51-46ed-a36b-a5b98a1bb790"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.475829 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" (UID: "56dcaf4a-8a1b-471b-9f04-c372f15b8fa5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.536089 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9239175-af51-46ed-a36b-a5b98a1bb790-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.536132 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9hc2\" (UniqueName: \"kubernetes.io/projected/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-kube-api-access-t9hc2\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.536147 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.536162 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ws7mm\" (UniqueName: \"kubernetes.io/projected/b9239175-af51-46ed-a36b-a5b98a1bb790-kube-api-access-ws7mm\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.536174 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.536186 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:17:59 crc kubenswrapper[4922]: I1128 07:17:59.536199 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9239175-af51-46ed-a36b-a5b98a1bb790-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.288785 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"56dcaf4a-8a1b-471b-9f04-c372f15b8fa5","Type":"ContainerDied","Data":"db559aaf5565335b2b892cbd01715b061dc29e55a215040eb5b0cdf095ce107a"} Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.288827 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.289130 4922 scope.go:117] "RemoveContainer" containerID="a009cbf707531870a158c461b6eb3d1c1b0328701de2624ac27d511eb95f3f7f" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.291861 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b9239175-af51-46ed-a36b-a5b98a1bb790","Type":"ContainerDied","Data":"79cd883bce84774776051ed457e1ffa2c8c667293281c4a9b7a6818a9fd6c448"} Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.291931 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.323479 4922 scope.go:117] "RemoveContainer" containerID="a7ce397f63e2c6437b00b9fae1a00e0f14b52fe35434a7c615e78de81dc1cdf2" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.339782 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.348528 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.360592 4922 scope.go:117] "RemoveContainer" containerID="6f611dbe3a4e388935f6d106d42d124fc15b557a46cdb2ff260bd9f2f857e234" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.361499 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:00 crc kubenswrapper[4922]: E1128 07:18:00.361975 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerName="nova-metadata-log" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.361996 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerName="nova-metadata-log" Nov 28 07:18:00 crc kubenswrapper[4922]: E1128 07:18:00.362017 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.362026 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 07:18:00 crc kubenswrapper[4922]: E1128 07:18:00.362057 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerName="nova-metadata-metadata" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.362065 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerName="nova-metadata-metadata" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.362345 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerName="nova-metadata-log" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.362393 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" containerName="nova-metadata-metadata" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.362424 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.365059 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.374063 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.374735 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.389892 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.410118 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.418048 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.427890 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.429249 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.432055 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.432281 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.432459 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.451261 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.454717 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.454797 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.454857 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t6l9\" (UniqueName: \"kubernetes.io/projected/0607507d-9b30-4943-91d4-b1de3122188c-kube-api-access-8t6l9\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.454898 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-config-data\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.454925 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.454961 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.454982 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0607507d-9b30-4943-91d4-b1de3122188c-logs\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.455042 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zspr\" (UniqueName: \"kubernetes.io/projected/98e654e6-cf7b-469f-aa60-118fee0e3764-kube-api-access-9zspr\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.455065 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.455113 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556403 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556499 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t6l9\" (UniqueName: \"kubernetes.io/projected/0607507d-9b30-4943-91d4-b1de3122188c-kube-api-access-8t6l9\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556543 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-config-data\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556580 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556624 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556650 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0607507d-9b30-4943-91d4-b1de3122188c-logs\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556692 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zspr\" (UniqueName: \"kubernetes.io/projected/98e654e6-cf7b-469f-aa60-118fee0e3764-kube-api-access-9zspr\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556714 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556759 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.556789 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.557930 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0607507d-9b30-4943-91d4-b1de3122188c-logs\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.562531 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.562672 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.563063 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.564828 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.565299 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-config-data\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.568691 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.569133 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.590092 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t6l9\" (UniqueName: \"kubernetes.io/projected/0607507d-9b30-4943-91d4-b1de3122188c-kube-api-access-8t6l9\") pod \"nova-metadata-0\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.597846 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zspr\" (UniqueName: \"kubernetes.io/projected/98e654e6-cf7b-469f-aa60-118fee0e3764-kube-api-access-9zspr\") pod \"nova-cell1-novncproxy-0\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.686968 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:18:00 crc kubenswrapper[4922]: I1128 07:18:00.748282 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:01 crc kubenswrapper[4922]: W1128 07:18:01.183207 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0607507d_9b30_4943_91d4_b1de3122188c.slice/crio-d6bece7c30f8653aacab6c9cf86b3b7a5ef607a080e561a53807da3c3bac8861 WatchSource:0}: Error finding container d6bece7c30f8653aacab6c9cf86b3b7a5ef607a080e561a53807da3c3bac8861: Status 404 returned error can't find the container with id d6bece7c30f8653aacab6c9cf86b3b7a5ef607a080e561a53807da3c3bac8861 Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.194464 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.284302 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:18:01 crc kubenswrapper[4922]: W1128 07:18:01.288970 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98e654e6_cf7b_469f_aa60_118fee0e3764.slice/crio-1a3a5d7523eb65abd59b6761275aa5706b3953f2d4e19f25dbfa1c0ddbd7328a WatchSource:0}: Error finding container 1a3a5d7523eb65abd59b6761275aa5706b3953f2d4e19f25dbfa1c0ddbd7328a: Status 404 returned error can't find the container with id 1a3a5d7523eb65abd59b6761275aa5706b3953f2d4e19f25dbfa1c0ddbd7328a Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.308010 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"98e654e6-cf7b-469f-aa60-118fee0e3764","Type":"ContainerStarted","Data":"1a3a5d7523eb65abd59b6761275aa5706b3953f2d4e19f25dbfa1c0ddbd7328a"} Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.309908 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0607507d-9b30-4943-91d4-b1de3122188c","Type":"ContainerStarted","Data":"d6bece7c30f8653aacab6c9cf86b3b7a5ef607a080e561a53807da3c3bac8861"} Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.410768 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56dcaf4a-8a1b-471b-9f04-c372f15b8fa5" path="/var/lib/kubelet/pods/56dcaf4a-8a1b-471b-9f04-c372f15b8fa5/volumes" Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.411938 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9239175-af51-46ed-a36b-a5b98a1bb790" path="/var/lib/kubelet/pods/b9239175-af51-46ed-a36b-a5b98a1bb790/volumes" Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.476613 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.477063 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.478470 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 07:18:01 crc kubenswrapper[4922]: I1128 07:18:01.483694 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.329135 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"98e654e6-cf7b-469f-aa60-118fee0e3764","Type":"ContainerStarted","Data":"4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71"} Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.333449 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0607507d-9b30-4943-91d4-b1de3122188c","Type":"ContainerStarted","Data":"a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40"} Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.333481 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0607507d-9b30-4943-91d4-b1de3122188c","Type":"ContainerStarted","Data":"9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f"} Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.333495 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.338744 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.359587 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.359566856 podStartE2EDuration="2.359566856s" podCreationTimestamp="2025-11-28 07:18:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:18:02.355872128 +0000 UTC m=+1527.276267730" watchObservedRunningTime="2025-11-28 07:18:02.359566856 +0000 UTC m=+1527.279962438" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.423570 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.423548789 podStartE2EDuration="2.423548789s" podCreationTimestamp="2025-11-28 07:18:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:18:02.41270578 +0000 UTC m=+1527.333101362" watchObservedRunningTime="2025-11-28 07:18:02.423548789 +0000 UTC m=+1527.343944371" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.568655 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc646c8f9-nfgpb"] Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.570488 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.580660 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc646c8f9-nfgpb"] Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.710355 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.710420 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-swift-storage-0\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.710462 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-nb\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.711553 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-svc\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.711597 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.711626 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw9zw\" (UniqueName: \"kubernetes.io/projected/2a50cebf-c40b-425a-86a1-7813277f1b5a-kube-api-access-bw9zw\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.813365 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-svc\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.813721 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.813746 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw9zw\" (UniqueName: \"kubernetes.io/projected/2a50cebf-c40b-425a-86a1-7813277f1b5a-kube-api-access-bw9zw\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.813838 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.813886 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-swift-storage-0\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.813919 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-nb\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.814551 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-svc\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.814647 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-nb\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.815164 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.815578 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.815693 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-swift-storage-0\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.833962 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw9zw\" (UniqueName: \"kubernetes.io/projected/2a50cebf-c40b-425a-86a1-7813277f1b5a-kube-api-access-bw9zw\") pod \"dnsmasq-dns-6bc646c8f9-nfgpb\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:02 crc kubenswrapper[4922]: I1128 07:18:02.930837 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:03 crc kubenswrapper[4922]: I1128 07:18:03.427956 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc646c8f9-nfgpb"] Nov 28 07:18:03 crc kubenswrapper[4922]: W1128 07:18:03.430491 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a50cebf_c40b_425a_86a1_7813277f1b5a.slice/crio-dc1012a60da575ba3700b918a6462fac59c76357be0de437ceb9253da366d18e WatchSource:0}: Error finding container dc1012a60da575ba3700b918a6462fac59c76357be0de437ceb9253da366d18e: Status 404 returned error can't find the container with id dc1012a60da575ba3700b918a6462fac59c76357be0de437ceb9253da366d18e Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.353781 4922 generic.go:334] "Generic (PLEG): container finished" podID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerID="fbdb24f0e3359c3008db7b03afd1ec4313fbb097c406925f05c1bc8c06c01499" exitCode=0 Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.355141 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" event={"ID":"2a50cebf-c40b-425a-86a1-7813277f1b5a","Type":"ContainerDied","Data":"fbdb24f0e3359c3008db7b03afd1ec4313fbb097c406925f05c1bc8c06c01499"} Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.355178 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" event={"ID":"2a50cebf-c40b-425a-86a1-7813277f1b5a","Type":"ContainerStarted","Data":"dc1012a60da575ba3700b918a6462fac59c76357be0de437ceb9253da366d18e"} Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.525493 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.526995 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="ceilometer-central-agent" containerID="cri-o://8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602" gracePeriod=30 Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.527495 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="proxy-httpd" containerID="cri-o://4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b" gracePeriod=30 Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.527549 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="ceilometer-notification-agent" containerID="cri-o://20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc" gracePeriod=30 Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.527693 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="sg-core" containerID="cri-o://65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078" gracePeriod=30 Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.538196 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.192:3000/\": read tcp 10.217.0.2:51514->10.217.0.192:3000: read: connection reset by peer" Nov 28 07:18:04 crc kubenswrapper[4922]: I1128 07:18:04.749060 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.364732 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" event={"ID":"2a50cebf-c40b-425a-86a1-7813277f1b5a","Type":"ContainerStarted","Data":"4f746f52686852078d4054015bbe108873cac8ffaf0e003c3bb348ac22540b3b"} Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.365524 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.368043 4922 generic.go:334] "Generic (PLEG): container finished" podID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerID="4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b" exitCode=0 Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.368086 4922 generic.go:334] "Generic (PLEG): container finished" podID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerID="65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078" exitCode=2 Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.368103 4922 generic.go:334] "Generic (PLEG): container finished" podID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerID="8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602" exitCode=0 Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.368454 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-log" containerID="cri-o://c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1" gracePeriod=30 Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.368857 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerDied","Data":"4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b"} Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.368908 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerDied","Data":"65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078"} Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.368934 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerDied","Data":"8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602"} Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.370903 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-api" containerID="cri-o://db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b" gracePeriod=30 Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.403583 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" podStartSLOduration=3.403559875 podStartE2EDuration="3.403559875s" podCreationTimestamp="2025-11-28 07:18:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:18:05.391419182 +0000 UTC m=+1530.311814764" watchObservedRunningTime="2025-11-28 07:18:05.403559875 +0000 UTC m=+1530.323955457" Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.688106 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.688160 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 07:18:05 crc kubenswrapper[4922]: I1128 07:18:05.749133 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:06 crc kubenswrapper[4922]: I1128 07:18:06.377463 4922 generic.go:334] "Generic (PLEG): container finished" podID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerID="c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1" exitCode=143 Nov 28 07:18:06 crc kubenswrapper[4922]: I1128 07:18:06.377533 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"541557ff-fa14-4f6c-b957-716ba8fdb38e","Type":"ContainerDied","Data":"c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1"} Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.039954 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.116095 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-scripts\") pod \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.116161 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-run-httpd\") pod \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.116239 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-combined-ca-bundle\") pod \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.116270 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-sg-core-conf-yaml\") pod \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.116328 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-ceilometer-tls-certs\") pod \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.116580 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ddc9a938-678c-447d-9d92-5aae0ba2a3f2" (UID: "ddc9a938-678c-447d-9d92-5aae0ba2a3f2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.116350 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2x27\" (UniqueName: \"kubernetes.io/projected/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-kube-api-access-k2x27\") pod \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.117183 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-config-data\") pod \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.117212 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-log-httpd\") pod \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\" (UID: \"ddc9a938-678c-447d-9d92-5aae0ba2a3f2\") " Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.117662 4922 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.117963 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ddc9a938-678c-447d-9d92-5aae0ba2a3f2" (UID: "ddc9a938-678c-447d-9d92-5aae0ba2a3f2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.122234 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-scripts" (OuterVolumeSpecName: "scripts") pod "ddc9a938-678c-447d-9d92-5aae0ba2a3f2" (UID: "ddc9a938-678c-447d-9d92-5aae0ba2a3f2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.122916 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-kube-api-access-k2x27" (OuterVolumeSpecName: "kube-api-access-k2x27") pod "ddc9a938-678c-447d-9d92-5aae0ba2a3f2" (UID: "ddc9a938-678c-447d-9d92-5aae0ba2a3f2"). InnerVolumeSpecName "kube-api-access-k2x27". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.153414 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ddc9a938-678c-447d-9d92-5aae0ba2a3f2" (UID: "ddc9a938-678c-447d-9d92-5aae0ba2a3f2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.177151 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "ddc9a938-678c-447d-9d92-5aae0ba2a3f2" (UID: "ddc9a938-678c-447d-9d92-5aae0ba2a3f2"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.199352 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ddc9a938-678c-447d-9d92-5aae0ba2a3f2" (UID: "ddc9a938-678c-447d-9d92-5aae0ba2a3f2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.219203 4922 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.219244 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.219275 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.219287 4922 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.219295 4922 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.219305 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2x27\" (UniqueName: \"kubernetes.io/projected/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-kube-api-access-k2x27\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.231199 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-config-data" (OuterVolumeSpecName: "config-data") pod "ddc9a938-678c-447d-9d92-5aae0ba2a3f2" (UID: "ddc9a938-678c-447d-9d92-5aae0ba2a3f2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.321441 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ddc9a938-678c-447d-9d92-5aae0ba2a3f2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.388174 4922 generic.go:334] "Generic (PLEG): container finished" podID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerID="20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc" exitCode=0 Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.388243 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerDied","Data":"20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc"} Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.388270 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.388298 4922 scope.go:117] "RemoveContainer" containerID="4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.388285 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ddc9a938-678c-447d-9d92-5aae0ba2a3f2","Type":"ContainerDied","Data":"45edf549ff46b90859a47fcda8568d9a596b1d48e1c19581d54428f4b013274b"} Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.430585 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.430705 4922 scope.go:117] "RemoveContainer" containerID="65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.450874 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.464263 4922 scope.go:117] "RemoveContainer" containerID="20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.466303 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:18:07 crc kubenswrapper[4922]: E1128 07:18:07.466829 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="sg-core" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.466928 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="sg-core" Nov 28 07:18:07 crc kubenswrapper[4922]: E1128 07:18:07.467022 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="ceilometer-notification-agent" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.467098 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="ceilometer-notification-agent" Nov 28 07:18:07 crc kubenswrapper[4922]: E1128 07:18:07.467178 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="proxy-httpd" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.467310 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="proxy-httpd" Nov 28 07:18:07 crc kubenswrapper[4922]: E1128 07:18:07.467390 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="ceilometer-central-agent" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.467461 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="ceilometer-central-agent" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.467778 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="ceilometer-central-agent" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.467875 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="proxy-httpd" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.467969 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="sg-core" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.468059 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" containerName="ceilometer-notification-agent" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.471411 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.474089 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.475539 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.479926 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.493706 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.504640 4922 scope.go:117] "RemoveContainer" containerID="8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.529637 4922 scope.go:117] "RemoveContainer" containerID="4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b" Nov 28 07:18:07 crc kubenswrapper[4922]: E1128 07:18:07.530335 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b\": container with ID starting with 4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b not found: ID does not exist" containerID="4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.530379 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b"} err="failed to get container status \"4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b\": rpc error: code = NotFound desc = could not find container \"4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b\": container with ID starting with 4515b03feb8c8fb76d0ad9e84a0bf8c6c6acf8f1a769733153def4eacb299d6b not found: ID does not exist" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.530404 4922 scope.go:117] "RemoveContainer" containerID="65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078" Nov 28 07:18:07 crc kubenswrapper[4922]: E1128 07:18:07.530673 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078\": container with ID starting with 65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078 not found: ID does not exist" containerID="65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.530702 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078"} err="failed to get container status \"65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078\": rpc error: code = NotFound desc = could not find container \"65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078\": container with ID starting with 65f2b52fa8944addce7361c5c245f10f09089a39e74515f16bf0634508878078 not found: ID does not exist" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.530725 4922 scope.go:117] "RemoveContainer" containerID="20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc" Nov 28 07:18:07 crc kubenswrapper[4922]: E1128 07:18:07.531181 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc\": container with ID starting with 20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc not found: ID does not exist" containerID="20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.531204 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc"} err="failed to get container status \"20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc\": rpc error: code = NotFound desc = could not find container \"20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc\": container with ID starting with 20f82159e489b6cb472be918aead4c432c9f5f948e88df6c5be425834b6fedbc not found: ID does not exist" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.531229 4922 scope.go:117] "RemoveContainer" containerID="8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602" Nov 28 07:18:07 crc kubenswrapper[4922]: E1128 07:18:07.531833 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602\": container with ID starting with 8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602 not found: ID does not exist" containerID="8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.531864 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602"} err="failed to get container status \"8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602\": rpc error: code = NotFound desc = could not find container \"8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602\": container with ID starting with 8e7719ea7956a92a12da3a26a3db3542e7fae883e1ad4c52618dad605078d602 not found: ID does not exist" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.628068 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.628366 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-scripts\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.628491 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.628667 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-log-httpd\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.629144 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.629402 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-run-httpd\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.629544 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-config-data\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.629700 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mhwk\" (UniqueName: \"kubernetes.io/projected/c5b2a607-b6c1-4e95-b722-8b150c25f371-kube-api-access-9mhwk\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.730865 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.730919 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-run-httpd\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.730957 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-config-data\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.730978 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mhwk\" (UniqueName: \"kubernetes.io/projected/c5b2a607-b6c1-4e95-b722-8b150c25f371-kube-api-access-9mhwk\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.731017 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.731053 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-scripts\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.731076 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.731105 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-log-httpd\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.731595 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-log-httpd\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.731821 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-run-httpd\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.735674 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.736350 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-scripts\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.737164 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.737661 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.740025 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-config-data\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.761334 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mhwk\" (UniqueName: \"kubernetes.io/projected/c5b2a607-b6c1-4e95-b722-8b150c25f371-kube-api-access-9mhwk\") pod \"ceilometer-0\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " pod="openstack/ceilometer-0" Nov 28 07:18:07 crc kubenswrapper[4922]: I1128 07:18:07.790899 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:18:08 crc kubenswrapper[4922]: I1128 07:18:08.309756 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:18:08 crc kubenswrapper[4922]: W1128 07:18:08.320851 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5b2a607_b6c1_4e95_b722_8b150c25f371.slice/crio-4720d32ff570d517cde351d4da303593a2676aa6f64374e9373752f76e7c7438 WatchSource:0}: Error finding container 4720d32ff570d517cde351d4da303593a2676aa6f64374e9373752f76e7c7438: Status 404 returned error can't find the container with id 4720d32ff570d517cde351d4da303593a2676aa6f64374e9373752f76e7c7438 Nov 28 07:18:08 crc kubenswrapper[4922]: I1128 07:18:08.401975 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerStarted","Data":"4720d32ff570d517cde351d4da303593a2676aa6f64374e9373752f76e7c7438"} Nov 28 07:18:08 crc kubenswrapper[4922]: I1128 07:18:08.987367 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.187072 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sg6qc\" (UniqueName: \"kubernetes.io/projected/541557ff-fa14-4f6c-b957-716ba8fdb38e-kube-api-access-sg6qc\") pod \"541557ff-fa14-4f6c-b957-716ba8fdb38e\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.187416 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-combined-ca-bundle\") pod \"541557ff-fa14-4f6c-b957-716ba8fdb38e\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.187576 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-config-data\") pod \"541557ff-fa14-4f6c-b957-716ba8fdb38e\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.187853 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/541557ff-fa14-4f6c-b957-716ba8fdb38e-logs\") pod \"541557ff-fa14-4f6c-b957-716ba8fdb38e\" (UID: \"541557ff-fa14-4f6c-b957-716ba8fdb38e\") " Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.188400 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/541557ff-fa14-4f6c-b957-716ba8fdb38e-logs" (OuterVolumeSpecName: "logs") pod "541557ff-fa14-4f6c-b957-716ba8fdb38e" (UID: "541557ff-fa14-4f6c-b957-716ba8fdb38e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.194568 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/541557ff-fa14-4f6c-b957-716ba8fdb38e-kube-api-access-sg6qc" (OuterVolumeSpecName: "kube-api-access-sg6qc") pod "541557ff-fa14-4f6c-b957-716ba8fdb38e" (UID: "541557ff-fa14-4f6c-b957-716ba8fdb38e"). InnerVolumeSpecName "kube-api-access-sg6qc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.226729 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "541557ff-fa14-4f6c-b957-716ba8fdb38e" (UID: "541557ff-fa14-4f6c-b957-716ba8fdb38e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.234323 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-config-data" (OuterVolumeSpecName: "config-data") pod "541557ff-fa14-4f6c-b957-716ba8fdb38e" (UID: "541557ff-fa14-4f6c-b957-716ba8fdb38e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.290162 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sg6qc\" (UniqueName: \"kubernetes.io/projected/541557ff-fa14-4f6c-b957-716ba8fdb38e-kube-api-access-sg6qc\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.290192 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.290201 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/541557ff-fa14-4f6c-b957-716ba8fdb38e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.290210 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/541557ff-fa14-4f6c-b957-716ba8fdb38e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.415695 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddc9a938-678c-447d-9d92-5aae0ba2a3f2" path="/var/lib/kubelet/pods/ddc9a938-678c-447d-9d92-5aae0ba2a3f2/volumes" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.420696 4922 generic.go:334] "Generic (PLEG): container finished" podID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerID="db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b" exitCode=0 Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.420800 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.422781 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"541557ff-fa14-4f6c-b957-716ba8fdb38e","Type":"ContainerDied","Data":"db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b"} Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.422817 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"541557ff-fa14-4f6c-b957-716ba8fdb38e","Type":"ContainerDied","Data":"111ac3a70f185bf85575d6320d90c3fb43a0236bf9cc734aca3b441aaa7b310e"} Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.422839 4922 scope.go:117] "RemoveContainer" containerID="db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.461960 4922 scope.go:117] "RemoveContainer" containerID="c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.499417 4922 scope.go:117] "RemoveContainer" containerID="db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b" Nov 28 07:18:09 crc kubenswrapper[4922]: E1128 07:18:09.502410 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b\": container with ID starting with db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b not found: ID does not exist" containerID="db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.502448 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b"} err="failed to get container status \"db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b\": rpc error: code = NotFound desc = could not find container \"db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b\": container with ID starting with db0c7b510a2f2c391d9395ac5a825a0b3f234fedbbdec3ce3eba8ee1d119e43b not found: ID does not exist" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.502475 4922 scope.go:117] "RemoveContainer" containerID="c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1" Nov 28 07:18:09 crc kubenswrapper[4922]: E1128 07:18:09.504528 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1\": container with ID starting with c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1 not found: ID does not exist" containerID="c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1" Nov 28 07:18:09 crc kubenswrapper[4922]: I1128 07:18:09.504562 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1"} err="failed to get container status \"c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1\": rpc error: code = NotFound desc = could not find container \"c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1\": container with ID starting with c97e47104c2d6277d0f91592a43e2278122aadcb3c00b2e446aa63c87643d4f1 not found: ID does not exist" Nov 28 07:18:10 crc kubenswrapper[4922]: I1128 07:18:10.436104 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerStarted","Data":"3f6adb9e4600f443d95c7ef3d113f08146c9cb6aad10859be6aa10d59ce782f0"} Nov 28 07:18:10 crc kubenswrapper[4922]: I1128 07:18:10.436621 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerStarted","Data":"76e1e7d5729fdebb0173eee23985b2e33e4bffc2543f1da0f92592472e21c4ec"} Nov 28 07:18:10 crc kubenswrapper[4922]: I1128 07:18:10.687999 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 07:18:10 crc kubenswrapper[4922]: I1128 07:18:10.688048 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 07:18:10 crc kubenswrapper[4922]: I1128 07:18:10.749482 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:10 crc kubenswrapper[4922]: I1128 07:18:10.768746 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.449453 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerStarted","Data":"1e15529dfacc098964836d9b402c0c6f8dfa68f13f57273de3bff5e93b296db5"} Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.463994 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.648483 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-jrqbz"] Nov 28 07:18:11 crc kubenswrapper[4922]: E1128 07:18:11.649838 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-log" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.649852 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-log" Nov 28 07:18:11 crc kubenswrapper[4922]: E1128 07:18:11.649898 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-api" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.649905 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-api" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.650592 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-api" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.650623 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" containerName="nova-api-log" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.652494 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.656696 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.659242 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-jrqbz"] Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.670241 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.716387 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.716749 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.764368 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-config-data\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.764649 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-scripts\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.764738 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6vcr\" (UniqueName: \"kubernetes.io/projected/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-kube-api-access-s6vcr\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.764845 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.866786 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-scripts\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.866847 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6vcr\" (UniqueName: \"kubernetes.io/projected/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-kube-api-access-s6vcr\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.866895 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.866964 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-config-data\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.871814 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.872929 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-config-data\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.877041 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-scripts\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.889144 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6vcr\" (UniqueName: \"kubernetes.io/projected/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-kube-api-access-s6vcr\") pod \"nova-cell1-cell-mapping-jrqbz\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:11 crc kubenswrapper[4922]: I1128 07:18:11.971949 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:12 crc kubenswrapper[4922]: I1128 07:18:12.456992 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-jrqbz"] Nov 28 07:18:12 crc kubenswrapper[4922]: W1128 07:18:12.464398 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d1b0372_7d56_456f_bcb5_2bad202c6f8f.slice/crio-39c2434a3d4b32d203cc3f6943907c0a58cb47962681d46a0ac8b84633813961 WatchSource:0}: Error finding container 39c2434a3d4b32d203cc3f6943907c0a58cb47962681d46a0ac8b84633813961: Status 404 returned error can't find the container with id 39c2434a3d4b32d203cc3f6943907c0a58cb47962681d46a0ac8b84633813961 Nov 28 07:18:12 crc kubenswrapper[4922]: I1128 07:18:12.932389 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.006557 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75df6cf455-8zvjq"] Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.007006 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" podUID="54142b32-7142-41ee-af4b-4a020911c136" containerName="dnsmasq-dns" containerID="cri-o://481401725c47439fb318ffd00b81bd3da87b04044342d823c60fbcdefada5137" gracePeriod=10 Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.469886 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-jrqbz" event={"ID":"7d1b0372-7d56-456f-bcb5-2bad202c6f8f","Type":"ContainerStarted","Data":"795b36a26de43f1013175b895aa1a7b8536f7165adb0d077f3c2fe6338925633"} Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.470235 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-jrqbz" event={"ID":"7d1b0372-7d56-456f-bcb5-2bad202c6f8f","Type":"ContainerStarted","Data":"39c2434a3d4b32d203cc3f6943907c0a58cb47962681d46a0ac8b84633813961"} Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.472126 4922 generic.go:334] "Generic (PLEG): container finished" podID="54142b32-7142-41ee-af4b-4a020911c136" containerID="481401725c47439fb318ffd00b81bd3da87b04044342d823c60fbcdefada5137" exitCode=0 Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.472255 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" event={"ID":"54142b32-7142-41ee-af4b-4a020911c136","Type":"ContainerDied","Data":"481401725c47439fb318ffd00b81bd3da87b04044342d823c60fbcdefada5137"} Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.472286 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" event={"ID":"54142b32-7142-41ee-af4b-4a020911c136","Type":"ContainerDied","Data":"401a537fd44921f6a08993dca9060a5101e75e0c84f05f3336a75fae2f033bf7"} Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.472301 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="401a537fd44921f6a08993dca9060a5101e75e0c84f05f3336a75fae2f033bf7" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.475903 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerStarted","Data":"46994e6ac2bb018930a78ad0cd1fe4646ebb6039110f4ddf448db9a5fa5fd689"} Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.476051 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.497079 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-jrqbz" podStartSLOduration=2.497057781 podStartE2EDuration="2.497057781s" podCreationTimestamp="2025-11-28 07:18:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:18:13.486551772 +0000 UTC m=+1538.406947354" watchObservedRunningTime="2025-11-28 07:18:13.497057781 +0000 UTC m=+1538.417453373" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.525742 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.214854988 podStartE2EDuration="6.525719174s" podCreationTimestamp="2025-11-28 07:18:07 +0000 UTC" firstStartedPulling="2025-11-28 07:18:08.326112022 +0000 UTC m=+1533.246507614" lastFinishedPulling="2025-11-28 07:18:12.636976208 +0000 UTC m=+1537.557371800" observedRunningTime="2025-11-28 07:18:13.512657256 +0000 UTC m=+1538.433052848" watchObservedRunningTime="2025-11-28 07:18:13.525719174 +0000 UTC m=+1538.446114756" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.567396 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.604064 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-nb\") pod \"54142b32-7142-41ee-af4b-4a020911c136\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.604178 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-swift-storage-0\") pod \"54142b32-7142-41ee-af4b-4a020911c136\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.604261 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-svc\") pod \"54142b32-7142-41ee-af4b-4a020911c136\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.604441 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-config\") pod \"54142b32-7142-41ee-af4b-4a020911c136\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.604471 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znqxk\" (UniqueName: \"kubernetes.io/projected/54142b32-7142-41ee-af4b-4a020911c136-kube-api-access-znqxk\") pod \"54142b32-7142-41ee-af4b-4a020911c136\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.604500 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-sb\") pod \"54142b32-7142-41ee-af4b-4a020911c136\" (UID: \"54142b32-7142-41ee-af4b-4a020911c136\") " Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.621503 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54142b32-7142-41ee-af4b-4a020911c136-kube-api-access-znqxk" (OuterVolumeSpecName: "kube-api-access-znqxk") pod "54142b32-7142-41ee-af4b-4a020911c136" (UID: "54142b32-7142-41ee-af4b-4a020911c136"). InnerVolumeSpecName "kube-api-access-znqxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.686620 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "54142b32-7142-41ee-af4b-4a020911c136" (UID: "54142b32-7142-41ee-af4b-4a020911c136"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.689240 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "54142b32-7142-41ee-af4b-4a020911c136" (UID: "54142b32-7142-41ee-af4b-4a020911c136"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.705669 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-config" (OuterVolumeSpecName: "config") pod "54142b32-7142-41ee-af4b-4a020911c136" (UID: "54142b32-7142-41ee-af4b-4a020911c136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.708236 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "54142b32-7142-41ee-af4b-4a020911c136" (UID: "54142b32-7142-41ee-af4b-4a020911c136"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.709606 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "54142b32-7142-41ee-af4b-4a020911c136" (UID: "54142b32-7142-41ee-af4b-4a020911c136"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.722300 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.722334 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.722347 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.722357 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.722366 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znqxk\" (UniqueName: \"kubernetes.io/projected/54142b32-7142-41ee-af4b-4a020911c136-kube-api-access-znqxk\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:13 crc kubenswrapper[4922]: I1128 07:18:13.722375 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/54142b32-7142-41ee-af4b-4a020911c136-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:14 crc kubenswrapper[4922]: I1128 07:18:14.484086 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75df6cf455-8zvjq" Nov 28 07:18:14 crc kubenswrapper[4922]: I1128 07:18:14.517662 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75df6cf455-8zvjq"] Nov 28 07:18:14 crc kubenswrapper[4922]: I1128 07:18:14.524586 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75df6cf455-8zvjq"] Nov 28 07:18:15 crc kubenswrapper[4922]: I1128 07:18:15.427328 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54142b32-7142-41ee-af4b-4a020911c136" path="/var/lib/kubelet/pods/54142b32-7142-41ee-af4b-4a020911c136/volumes" Nov 28 07:18:17 crc kubenswrapper[4922]: I1128 07:18:17.519020 4922 generic.go:334] "Generic (PLEG): container finished" podID="7d1b0372-7d56-456f-bcb5-2bad202c6f8f" containerID="795b36a26de43f1013175b895aa1a7b8536f7165adb0d077f3c2fe6338925633" exitCode=0 Nov 28 07:18:17 crc kubenswrapper[4922]: I1128 07:18:17.519107 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-jrqbz" event={"ID":"7d1b0372-7d56-456f-bcb5-2bad202c6f8f","Type":"ContainerDied","Data":"795b36a26de43f1013175b895aa1a7b8536f7165adb0d077f3c2fe6338925633"} Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.029829 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.141875 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6vcr\" (UniqueName: \"kubernetes.io/projected/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-kube-api-access-s6vcr\") pod \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.141998 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-combined-ca-bundle\") pod \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.142043 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-scripts\") pod \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.142086 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-config-data\") pod \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\" (UID: \"7d1b0372-7d56-456f-bcb5-2bad202c6f8f\") " Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.148151 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-kube-api-access-s6vcr" (OuterVolumeSpecName: "kube-api-access-s6vcr") pod "7d1b0372-7d56-456f-bcb5-2bad202c6f8f" (UID: "7d1b0372-7d56-456f-bcb5-2bad202c6f8f"). InnerVolumeSpecName "kube-api-access-s6vcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.148663 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-scripts" (OuterVolumeSpecName: "scripts") pod "7d1b0372-7d56-456f-bcb5-2bad202c6f8f" (UID: "7d1b0372-7d56-456f-bcb5-2bad202c6f8f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.177416 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7d1b0372-7d56-456f-bcb5-2bad202c6f8f" (UID: "7d1b0372-7d56-456f-bcb5-2bad202c6f8f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.181507 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-config-data" (OuterVolumeSpecName: "config-data") pod "7d1b0372-7d56-456f-bcb5-2bad202c6f8f" (UID: "7d1b0372-7d56-456f-bcb5-2bad202c6f8f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.244047 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6vcr\" (UniqueName: \"kubernetes.io/projected/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-kube-api-access-s6vcr\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.244426 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.244441 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.244453 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1b0372-7d56-456f-bcb5-2bad202c6f8f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.545709 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-jrqbz" event={"ID":"7d1b0372-7d56-456f-bcb5-2bad202c6f8f","Type":"ContainerDied","Data":"39c2434a3d4b32d203cc3f6943907c0a58cb47962681d46a0ac8b84633813961"} Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.545769 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39c2434a3d4b32d203cc3f6943907c0a58cb47962681d46a0ac8b84633813961" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.545770 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-jrqbz" Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.732076 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.732307 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0422898c-d82b-4976-add8-6d15f54781e1" containerName="nova-scheduler-scheduler" containerID="cri-o://ac60a9b1c6addd63b2ba146072d744df055b956e6e9bfa45aa75d108e531614f" gracePeriod=30 Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.782384 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.782644 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-log" containerID="cri-o://9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f" gracePeriod=30 Nov 28 07:18:19 crc kubenswrapper[4922]: I1128 07:18:19.782801 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-metadata" containerID="cri-o://a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40" gracePeriod=30 Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.439523 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l66w6"] Nov 28 07:18:20 crc kubenswrapper[4922]: E1128 07:18:20.440258 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1b0372-7d56-456f-bcb5-2bad202c6f8f" containerName="nova-manage" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.440276 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1b0372-7d56-456f-bcb5-2bad202c6f8f" containerName="nova-manage" Nov 28 07:18:20 crc kubenswrapper[4922]: E1128 07:18:20.440308 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54142b32-7142-41ee-af4b-4a020911c136" containerName="dnsmasq-dns" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.440317 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="54142b32-7142-41ee-af4b-4a020911c136" containerName="dnsmasq-dns" Nov 28 07:18:20 crc kubenswrapper[4922]: E1128 07:18:20.440331 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54142b32-7142-41ee-af4b-4a020911c136" containerName="init" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.440341 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="54142b32-7142-41ee-af4b-4a020911c136" containerName="init" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.440526 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d1b0372-7d56-456f-bcb5-2bad202c6f8f" containerName="nova-manage" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.440540 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="54142b32-7142-41ee-af4b-4a020911c136" containerName="dnsmasq-dns" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.441939 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.451932 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l66w6"] Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.561598 4922 generic.go:334] "Generic (PLEG): container finished" podID="0422898c-d82b-4976-add8-6d15f54781e1" containerID="ac60a9b1c6addd63b2ba146072d744df055b956e6e9bfa45aa75d108e531614f" exitCode=0 Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.561656 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0422898c-d82b-4976-add8-6d15f54781e1","Type":"ContainerDied","Data":"ac60a9b1c6addd63b2ba146072d744df055b956e6e9bfa45aa75d108e531614f"} Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.565952 4922 generic.go:334] "Generic (PLEG): container finished" podID="0607507d-9b30-4943-91d4-b1de3122188c" containerID="9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f" exitCode=143 Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.565950 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-utilities\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.566020 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0607507d-9b30-4943-91d4-b1de3122188c","Type":"ContainerDied","Data":"9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f"} Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.566125 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-catalog-content\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.566150 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f74bb\" (UniqueName: \"kubernetes.io/projected/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-kube-api-access-f74bb\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.668020 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-utilities\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.668092 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-catalog-content\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.668113 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f74bb\" (UniqueName: \"kubernetes.io/projected/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-kube-api-access-f74bb\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.668519 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-catalog-content\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.668646 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-utilities\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.693470 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f74bb\" (UniqueName: \"kubernetes.io/projected/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-kube-api-access-f74bb\") pod \"certified-operators-l66w6\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.743854 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.764569 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.872107 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-config-data\") pod \"0422898c-d82b-4976-add8-6d15f54781e1\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.872421 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-combined-ca-bundle\") pod \"0422898c-d82b-4976-add8-6d15f54781e1\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.872482 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfxtq\" (UniqueName: \"kubernetes.io/projected/0422898c-d82b-4976-add8-6d15f54781e1-kube-api-access-wfxtq\") pod \"0422898c-d82b-4976-add8-6d15f54781e1\" (UID: \"0422898c-d82b-4976-add8-6d15f54781e1\") " Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.886640 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0422898c-d82b-4976-add8-6d15f54781e1-kube-api-access-wfxtq" (OuterVolumeSpecName: "kube-api-access-wfxtq") pod "0422898c-d82b-4976-add8-6d15f54781e1" (UID: "0422898c-d82b-4976-add8-6d15f54781e1"). InnerVolumeSpecName "kube-api-access-wfxtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.938262 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-config-data" (OuterVolumeSpecName: "config-data") pod "0422898c-d82b-4976-add8-6d15f54781e1" (UID: "0422898c-d82b-4976-add8-6d15f54781e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.955317 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0422898c-d82b-4976-add8-6d15f54781e1" (UID: "0422898c-d82b-4976-add8-6d15f54781e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.974604 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.974629 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0422898c-d82b-4976-add8-6d15f54781e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:20 crc kubenswrapper[4922]: I1128 07:18:20.974639 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfxtq\" (UniqueName: \"kubernetes.io/projected/0422898c-d82b-4976-add8-6d15f54781e1-kube-api-access-wfxtq\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.276593 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l66w6"] Nov 28 07:18:21 crc kubenswrapper[4922]: W1128 07:18:21.277166 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcbebe8ab_53eb_4e00_8db0_8d4d91417c6b.slice/crio-10e4f4351629d9ab1c1c3be0ca8523b46ee9518f359953d13cdf8b3636c29b71 WatchSource:0}: Error finding container 10e4f4351629d9ab1c1c3be0ca8523b46ee9518f359953d13cdf8b3636c29b71: Status 404 returned error can't find the container with id 10e4f4351629d9ab1c1c3be0ca8523b46ee9518f359953d13cdf8b3636c29b71 Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.578785 4922 generic.go:334] "Generic (PLEG): container finished" podID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerID="b08034340807b18ebf7bfdd4aaa10d14336d74bdb2d48950ea00aa6811189997" exitCode=0 Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.578831 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l66w6" event={"ID":"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b","Type":"ContainerDied","Data":"b08034340807b18ebf7bfdd4aaa10d14336d74bdb2d48950ea00aa6811189997"} Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.578874 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l66w6" event={"ID":"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b","Type":"ContainerStarted","Data":"10e4f4351629d9ab1c1c3be0ca8523b46ee9518f359953d13cdf8b3636c29b71"} Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.581048 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0422898c-d82b-4976-add8-6d15f54781e1","Type":"ContainerDied","Data":"816b230dfadee572c4e2cfb2f41818265fe2573d5d5aaee24302ef0d0e0ccadc"} Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.581103 4922 scope.go:117] "RemoveContainer" containerID="ac60a9b1c6addd63b2ba146072d744df055b956e6e9bfa45aa75d108e531614f" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.581266 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.620412 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.630481 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.642008 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:18:21 crc kubenswrapper[4922]: E1128 07:18:21.642503 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0422898c-d82b-4976-add8-6d15f54781e1" containerName="nova-scheduler-scheduler" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.642525 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="0422898c-d82b-4976-add8-6d15f54781e1" containerName="nova-scheduler-scheduler" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.642772 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="0422898c-d82b-4976-add8-6d15f54781e1" containerName="nova-scheduler-scheduler" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.643535 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.646072 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.651742 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.686389 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5f5t\" (UniqueName: \"kubernetes.io/projected/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-kube-api-access-x5f5t\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.686707 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-config-data\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.686959 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.791090 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.791168 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5f5t\" (UniqueName: \"kubernetes.io/projected/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-kube-api-access-x5f5t\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.791207 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-config-data\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.813022 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-config-data\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.818043 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.831990 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5f5t\" (UniqueName: \"kubernetes.io/projected/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-kube-api-access-x5f5t\") pod \"nova-scheduler-0\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " pod="openstack/nova-scheduler-0" Nov 28 07:18:21 crc kubenswrapper[4922]: I1128 07:18:21.960838 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:18:22 crc kubenswrapper[4922]: I1128 07:18:22.450070 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:18:22 crc kubenswrapper[4922]: I1128 07:18:22.592958 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11","Type":"ContainerStarted","Data":"6d71811e5741d52b835a8b9bf6419fd18e742986cfea62f14048e501ac950896"} Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.412612 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0422898c-d82b-4976-add8-6d15f54781e1" path="/var/lib/kubelet/pods/0422898c-d82b-4976-add8-6d15f54781e1/volumes" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.431362 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.524327 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-combined-ca-bundle\") pod \"0607507d-9b30-4943-91d4-b1de3122188c\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.524455 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0607507d-9b30-4943-91d4-b1de3122188c-logs\") pod \"0607507d-9b30-4943-91d4-b1de3122188c\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.524510 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-config-data\") pod \"0607507d-9b30-4943-91d4-b1de3122188c\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.524551 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-nova-metadata-tls-certs\") pod \"0607507d-9b30-4943-91d4-b1de3122188c\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.524646 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8t6l9\" (UniqueName: \"kubernetes.io/projected/0607507d-9b30-4943-91d4-b1de3122188c-kube-api-access-8t6l9\") pod \"0607507d-9b30-4943-91d4-b1de3122188c\" (UID: \"0607507d-9b30-4943-91d4-b1de3122188c\") " Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.525456 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0607507d-9b30-4943-91d4-b1de3122188c-logs" (OuterVolumeSpecName: "logs") pod "0607507d-9b30-4943-91d4-b1de3122188c" (UID: "0607507d-9b30-4943-91d4-b1de3122188c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.530393 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0607507d-9b30-4943-91d4-b1de3122188c-kube-api-access-8t6l9" (OuterVolumeSpecName: "kube-api-access-8t6l9") pod "0607507d-9b30-4943-91d4-b1de3122188c" (UID: "0607507d-9b30-4943-91d4-b1de3122188c"). InnerVolumeSpecName "kube-api-access-8t6l9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.555599 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-config-data" (OuterVolumeSpecName: "config-data") pod "0607507d-9b30-4943-91d4-b1de3122188c" (UID: "0607507d-9b30-4943-91d4-b1de3122188c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.573716 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "0607507d-9b30-4943-91d4-b1de3122188c" (UID: "0607507d-9b30-4943-91d4-b1de3122188c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.575548 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0607507d-9b30-4943-91d4-b1de3122188c" (UID: "0607507d-9b30-4943-91d4-b1de3122188c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.612177 4922 generic.go:334] "Generic (PLEG): container finished" podID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerID="39639f56f41c9c406ba10fe14e7f675dcb46d81df6d599268c2b0aee4d24ea56" exitCode=0 Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.612295 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l66w6" event={"ID":"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b","Type":"ContainerDied","Data":"39639f56f41c9c406ba10fe14e7f675dcb46d81df6d599268c2b0aee4d24ea56"} Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.614413 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11","Type":"ContainerStarted","Data":"6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11"} Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.620827 4922 generic.go:334] "Generic (PLEG): container finished" podID="0607507d-9b30-4943-91d4-b1de3122188c" containerID="a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40" exitCode=0 Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.620876 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0607507d-9b30-4943-91d4-b1de3122188c","Type":"ContainerDied","Data":"a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40"} Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.620931 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0607507d-9b30-4943-91d4-b1de3122188c","Type":"ContainerDied","Data":"d6bece7c30f8653aacab6c9cf86b3b7a5ef607a080e561a53807da3c3bac8861"} Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.620952 4922 scope.go:117] "RemoveContainer" containerID="a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.621192 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.626352 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0607507d-9b30-4943-91d4-b1de3122188c-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.626492 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.626557 4922 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.626619 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8t6l9\" (UniqueName: \"kubernetes.io/projected/0607507d-9b30-4943-91d4-b1de3122188c-kube-api-access-8t6l9\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.626674 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0607507d-9b30-4943-91d4-b1de3122188c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.655601 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.655580599 podStartE2EDuration="2.655580599s" podCreationTimestamp="2025-11-28 07:18:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:18:23.645851221 +0000 UTC m=+1548.566246803" watchObservedRunningTime="2025-11-28 07:18:23.655580599 +0000 UTC m=+1548.575976191" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.695621 4922 scope.go:117] "RemoveContainer" containerID="9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.706564 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.722582 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.727819 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:23 crc kubenswrapper[4922]: E1128 07:18:23.728300 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-metadata" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.728313 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-metadata" Nov 28 07:18:23 crc kubenswrapper[4922]: E1128 07:18:23.728329 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-log" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.728335 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-log" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.728503 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-metadata" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.728522 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="0607507d-9b30-4943-91d4-b1de3122188c" containerName="nova-metadata-log" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.729681 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.738901 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.739136 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.753930 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.754718 4922 scope.go:117] "RemoveContainer" containerID="a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40" Nov 28 07:18:23 crc kubenswrapper[4922]: E1128 07:18:23.755456 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40\": container with ID starting with a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40 not found: ID does not exist" containerID="a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.755492 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40"} err="failed to get container status \"a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40\": rpc error: code = NotFound desc = could not find container \"a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40\": container with ID starting with a7cd549dbd5f930c4e37c52941bb6843d5700691d4aedf95168f04ee13f67d40 not found: ID does not exist" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.755520 4922 scope.go:117] "RemoveContainer" containerID="9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f" Nov 28 07:18:23 crc kubenswrapper[4922]: E1128 07:18:23.755966 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f\": container with ID starting with 9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f not found: ID does not exist" containerID="9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.756014 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f"} err="failed to get container status \"9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f\": rpc error: code = NotFound desc = could not find container \"9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f\": container with ID starting with 9feb35ce2c7719553854c5f271fad97fb06d43980358752d4b7e7759514b203f not found: ID does not exist" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.829673 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.829735 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-config-data\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.829796 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxzpk\" (UniqueName: \"kubernetes.io/projected/7e1382f2-6597-4c09-a171-8709e4b9f5f7-kube-api-access-fxzpk\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.829823 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.829856 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e1382f2-6597-4c09-a171-8709e4b9f5f7-logs\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.931598 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.931647 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-config-data\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.931676 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxzpk\" (UniqueName: \"kubernetes.io/projected/7e1382f2-6597-4c09-a171-8709e4b9f5f7-kube-api-access-fxzpk\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.931712 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.931752 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e1382f2-6597-4c09-a171-8709e4b9f5f7-logs\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.932167 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e1382f2-6597-4c09-a171-8709e4b9f5f7-logs\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.936745 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.937008 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-config-data\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.937091 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:23 crc kubenswrapper[4922]: I1128 07:18:23.956943 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxzpk\" (UniqueName: \"kubernetes.io/projected/7e1382f2-6597-4c09-a171-8709e4b9f5f7-kube-api-access-fxzpk\") pod \"nova-metadata-0\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " pod="openstack/nova-metadata-0" Nov 28 07:18:24 crc kubenswrapper[4922]: I1128 07:18:24.069024 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:18:24 crc kubenswrapper[4922]: I1128 07:18:24.542753 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:18:24 crc kubenswrapper[4922]: I1128 07:18:24.634410 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l66w6" event={"ID":"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b","Type":"ContainerStarted","Data":"214697a61ad1436b0eecd98f6cdd8ae0071bb61ac46810760fe7c5abea17b8ba"} Nov 28 07:18:24 crc kubenswrapper[4922]: I1128 07:18:24.638745 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e1382f2-6597-4c09-a171-8709e4b9f5f7","Type":"ContainerStarted","Data":"2a47a2154388c1c5b146cd321711e72042e52118966dec26aeedd6cbdb3d38cf"} Nov 28 07:18:24 crc kubenswrapper[4922]: I1128 07:18:24.657947 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l66w6" podStartSLOduration=2.043985802 podStartE2EDuration="4.657926538s" podCreationTimestamp="2025-11-28 07:18:20 +0000 UTC" firstStartedPulling="2025-11-28 07:18:21.580400287 +0000 UTC m=+1546.500795869" lastFinishedPulling="2025-11-28 07:18:24.194341023 +0000 UTC m=+1549.114736605" observedRunningTime="2025-11-28 07:18:24.653238003 +0000 UTC m=+1549.573633605" watchObservedRunningTime="2025-11-28 07:18:24.657926538 +0000 UTC m=+1549.578322120" Nov 28 07:18:25 crc kubenswrapper[4922]: I1128 07:18:25.412288 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0607507d-9b30-4943-91d4-b1de3122188c" path="/var/lib/kubelet/pods/0607507d-9b30-4943-91d4-b1de3122188c/volumes" Nov 28 07:18:25 crc kubenswrapper[4922]: I1128 07:18:25.649361 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e1382f2-6597-4c09-a171-8709e4b9f5f7","Type":"ContainerStarted","Data":"a81a1de4776394ff567145b64398d5b889f141b8c8414e1def8da4f9987ce3f1"} Nov 28 07:18:25 crc kubenswrapper[4922]: I1128 07:18:25.649737 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e1382f2-6597-4c09-a171-8709e4b9f5f7","Type":"ContainerStarted","Data":"15a728fdcdf56d07c7207624ae8d8276363300730509050cda619bc595b50b6d"} Nov 28 07:18:25 crc kubenswrapper[4922]: I1128 07:18:25.678460 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.678440969 podStartE2EDuration="2.678440969s" podCreationTimestamp="2025-11-28 07:18:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:18:25.674917306 +0000 UTC m=+1550.595312908" watchObservedRunningTime="2025-11-28 07:18:25.678440969 +0000 UTC m=+1550.598836551" Nov 28 07:18:26 crc kubenswrapper[4922]: I1128 07:18:26.961034 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 07:18:27 crc kubenswrapper[4922]: I1128 07:18:27.312203 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:18:27 crc kubenswrapper[4922]: I1128 07:18:27.312663 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.305090 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bbctq"] Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.308608 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.322072 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bbctq"] Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.422562 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-td9g5\" (UniqueName: \"kubernetes.io/projected/61e29ed1-a71d-45cd-b892-c4145215a923-kube-api-access-td9g5\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.423666 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-utilities\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.423845 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-catalog-content\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.526107 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-utilities\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.526245 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-catalog-content\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.526802 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-utilities\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.526825 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-catalog-content\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.527005 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-td9g5\" (UniqueName: \"kubernetes.io/projected/61e29ed1-a71d-45cd-b892-c4145215a923-kube-api-access-td9g5\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.565623 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-td9g5\" (UniqueName: \"kubernetes.io/projected/61e29ed1-a71d-45cd-b892-c4145215a923-kube-api-access-td9g5\") pod \"redhat-operators-bbctq\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:28 crc kubenswrapper[4922]: I1128 07:18:28.670820 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:29 crc kubenswrapper[4922]: I1128 07:18:29.069262 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 07:18:29 crc kubenswrapper[4922]: I1128 07:18:29.069315 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 07:18:29 crc kubenswrapper[4922]: I1128 07:18:29.093783 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bbctq"] Nov 28 07:18:29 crc kubenswrapper[4922]: W1128 07:18:29.098836 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61e29ed1_a71d_45cd_b892_c4145215a923.slice/crio-044de11a152f781951877730138e5d8b2109c1456587fdbf23f68f1d6ca96c08 WatchSource:0}: Error finding container 044de11a152f781951877730138e5d8b2109c1456587fdbf23f68f1d6ca96c08: Status 404 returned error can't find the container with id 044de11a152f781951877730138e5d8b2109c1456587fdbf23f68f1d6ca96c08 Nov 28 07:18:29 crc kubenswrapper[4922]: I1128 07:18:29.694399 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbctq" event={"ID":"61e29ed1-a71d-45cd-b892-c4145215a923","Type":"ContainerStarted","Data":"044de11a152f781951877730138e5d8b2109c1456587fdbf23f68f1d6ca96c08"} Nov 28 07:18:30 crc kubenswrapper[4922]: I1128 07:18:30.713459 4922 generic.go:334] "Generic (PLEG): container finished" podID="61e29ed1-a71d-45cd-b892-c4145215a923" containerID="0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de" exitCode=0 Nov 28 07:18:30 crc kubenswrapper[4922]: I1128 07:18:30.713523 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbctq" event={"ID":"61e29ed1-a71d-45cd-b892-c4145215a923","Type":"ContainerDied","Data":"0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de"} Nov 28 07:18:30 crc kubenswrapper[4922]: I1128 07:18:30.765030 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:30 crc kubenswrapper[4922]: I1128 07:18:30.765082 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:30 crc kubenswrapper[4922]: I1128 07:18:30.814455 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:31 crc kubenswrapper[4922]: I1128 07:18:31.731376 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbctq" event={"ID":"61e29ed1-a71d-45cd-b892-c4145215a923","Type":"ContainerStarted","Data":"f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711"} Nov 28 07:18:31 crc kubenswrapper[4922]: I1128 07:18:31.785324 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:31 crc kubenswrapper[4922]: I1128 07:18:31.961612 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 07:18:31 crc kubenswrapper[4922]: I1128 07:18:31.990188 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 07:18:32 crc kubenswrapper[4922]: I1128 07:18:32.774457 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 07:18:33 crc kubenswrapper[4922]: I1128 07:18:33.070920 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l66w6"] Nov 28 07:18:33 crc kubenswrapper[4922]: I1128 07:18:33.749953 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l66w6" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerName="registry-server" containerID="cri-o://214697a61ad1436b0eecd98f6cdd8ae0071bb61ac46810760fe7c5abea17b8ba" gracePeriod=2 Nov 28 07:18:34 crc kubenswrapper[4922]: I1128 07:18:34.070150 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 07:18:34 crc kubenswrapper[4922]: I1128 07:18:34.070521 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 07:18:34 crc kubenswrapper[4922]: I1128 07:18:34.761448 4922 generic.go:334] "Generic (PLEG): container finished" podID="61e29ed1-a71d-45cd-b892-c4145215a923" containerID="f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711" exitCode=0 Nov 28 07:18:34 crc kubenswrapper[4922]: I1128 07:18:34.761500 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbctq" event={"ID":"61e29ed1-a71d-45cd-b892-c4145215a923","Type":"ContainerDied","Data":"f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711"} Nov 28 07:18:35 crc kubenswrapper[4922]: I1128 07:18:35.087477 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:18:35 crc kubenswrapper[4922]: I1128 07:18:35.087504 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:18:36 crc kubenswrapper[4922]: I1128 07:18:36.968426 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbctq" event={"ID":"61e29ed1-a71d-45cd-b892-c4145215a923","Type":"ContainerStarted","Data":"a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9"} Nov 28 07:18:36 crc kubenswrapper[4922]: I1128 07:18:36.971830 4922 generic.go:334] "Generic (PLEG): container finished" podID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerID="214697a61ad1436b0eecd98f6cdd8ae0071bb61ac46810760fe7c5abea17b8ba" exitCode=0 Nov 28 07:18:36 crc kubenswrapper[4922]: I1128 07:18:36.971864 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l66w6" event={"ID":"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b","Type":"ContainerDied","Data":"214697a61ad1436b0eecd98f6cdd8ae0071bb61ac46810760fe7c5abea17b8ba"} Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.001999 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bbctq" podStartSLOduration=3.2822587739999998 podStartE2EDuration="9.001981864s" podCreationTimestamp="2025-11-28 07:18:28 +0000 UTC" firstStartedPulling="2025-11-28 07:18:30.715821784 +0000 UTC m=+1555.636217376" lastFinishedPulling="2025-11-28 07:18:36.435544884 +0000 UTC m=+1561.355940466" observedRunningTime="2025-11-28 07:18:36.995313647 +0000 UTC m=+1561.915709229" watchObservedRunningTime="2025-11-28 07:18:37.001981864 +0000 UTC m=+1561.922377446" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.072932 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.127393 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-catalog-content\") pod \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.127444 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-utilities\") pod \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.127550 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f74bb\" (UniqueName: \"kubernetes.io/projected/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-kube-api-access-f74bb\") pod \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\" (UID: \"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b\") " Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.128493 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-utilities" (OuterVolumeSpecName: "utilities") pod "cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" (UID: "cbebe8ab-53eb-4e00-8db0-8d4d91417c6b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.134334 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-kube-api-access-f74bb" (OuterVolumeSpecName: "kube-api-access-f74bb") pod "cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" (UID: "cbebe8ab-53eb-4e00-8db0-8d4d91417c6b"). InnerVolumeSpecName "kube-api-access-f74bb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.181259 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" (UID: "cbebe8ab-53eb-4e00-8db0-8d4d91417c6b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.229741 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.229771 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.229781 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f74bb\" (UniqueName: \"kubernetes.io/projected/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b-kube-api-access-f74bb\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.803607 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.985038 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l66w6" event={"ID":"cbebe8ab-53eb-4e00-8db0-8d4d91417c6b","Type":"ContainerDied","Data":"10e4f4351629d9ab1c1c3be0ca8523b46ee9518f359953d13cdf8b3636c29b71"} Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.985088 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l66w6" Nov 28 07:18:37 crc kubenswrapper[4922]: I1128 07:18:37.985115 4922 scope.go:117] "RemoveContainer" containerID="214697a61ad1436b0eecd98f6cdd8ae0071bb61ac46810760fe7c5abea17b8ba" Nov 28 07:18:38 crc kubenswrapper[4922]: I1128 07:18:38.013946 4922 scope.go:117] "RemoveContainer" containerID="39639f56f41c9c406ba10fe14e7f675dcb46d81df6d599268c2b0aee4d24ea56" Nov 28 07:18:38 crc kubenswrapper[4922]: I1128 07:18:38.018204 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l66w6"] Nov 28 07:18:38 crc kubenswrapper[4922]: I1128 07:18:38.025990 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l66w6"] Nov 28 07:18:38 crc kubenswrapper[4922]: I1128 07:18:38.037140 4922 scope.go:117] "RemoveContainer" containerID="b08034340807b18ebf7bfdd4aaa10d14336d74bdb2d48950ea00aa6811189997" Nov 28 07:18:38 crc kubenswrapper[4922]: I1128 07:18:38.671855 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:38 crc kubenswrapper[4922]: I1128 07:18:38.672500 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:39 crc kubenswrapper[4922]: I1128 07:18:39.415668 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" path="/var/lib/kubelet/pods/cbebe8ab-53eb-4e00-8db0-8d4d91417c6b/volumes" Nov 28 07:18:39 crc kubenswrapper[4922]: I1128 07:18:39.442008 4922 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod541557ff-fa14-4f6c-b957-716ba8fdb38e"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod541557ff-fa14-4f6c-b957-716ba8fdb38e] : Timed out while waiting for systemd to remove kubepods-besteffort-pod541557ff_fa14_4f6c_b957_716ba8fdb38e.slice" Nov 28 07:18:39 crc kubenswrapper[4922]: E1128 07:18:39.442055 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod541557ff-fa14-4f6c-b957-716ba8fdb38e] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod541557ff-fa14-4f6c-b957-716ba8fdb38e] : Timed out while waiting for systemd to remove kubepods-besteffort-pod541557ff_fa14_4f6c_b957_716ba8fdb38e.slice" pod="openstack/nova-api-0" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" Nov 28 07:18:39 crc kubenswrapper[4922]: I1128 07:18:39.744725 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bbctq" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="registry-server" probeResult="failure" output=< Nov 28 07:18:39 crc kubenswrapper[4922]: timeout: failed to connect service ":50051" within 1s Nov 28 07:18:39 crc kubenswrapper[4922]: > Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.007855 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.047368 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.072577 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.084074 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 07:18:40 crc kubenswrapper[4922]: E1128 07:18:40.084843 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerName="extract-utilities" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.085016 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerName="extract-utilities" Nov 28 07:18:40 crc kubenswrapper[4922]: E1128 07:18:40.085126 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerName="extract-content" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.085213 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerName="extract-content" Nov 28 07:18:40 crc kubenswrapper[4922]: E1128 07:18:40.085526 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerName="registry-server" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.085614 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerName="registry-server" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.085968 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbebe8ab-53eb-4e00-8db0-8d4d91417c6b" containerName="registry-server" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.087556 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.090664 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.091343 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.092408 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.100978 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.187362 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-config-data\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.187416 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.187463 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-public-tls-certs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.187490 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.187594 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lhk5\" (UniqueName: \"kubernetes.io/projected/ed30601b-1e7c-4aa6-8469-8ff61cd93253-kube-api-access-9lhk5\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.187650 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed30601b-1e7c-4aa6-8469-8ff61cd93253-logs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.288891 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-config-data\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.288950 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.289002 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-public-tls-certs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.289036 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.289078 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lhk5\" (UniqueName: \"kubernetes.io/projected/ed30601b-1e7c-4aa6-8469-8ff61cd93253-kube-api-access-9lhk5\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.289170 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed30601b-1e7c-4aa6-8469-8ff61cd93253-logs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.289622 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed30601b-1e7c-4aa6-8469-8ff61cd93253-logs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.295077 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-public-tls-certs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.295531 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.299194 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-config-data\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.304830 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.307574 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lhk5\" (UniqueName: \"kubernetes.io/projected/ed30601b-1e7c-4aa6-8469-8ff61cd93253-kube-api-access-9lhk5\") pod \"nova-api-0\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.404370 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:18:40 crc kubenswrapper[4922]: I1128 07:18:40.902187 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:18:41 crc kubenswrapper[4922]: I1128 07:18:41.018966 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ed30601b-1e7c-4aa6-8469-8ff61cd93253","Type":"ContainerStarted","Data":"e0df2ebd96b3d5d19928117437f5b4a2d59ac3de23850ed4adddc3d9deb76717"} Nov 28 07:18:41 crc kubenswrapper[4922]: I1128 07:18:41.408857 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="541557ff-fa14-4f6c-b957-716ba8fdb38e" path="/var/lib/kubelet/pods/541557ff-fa14-4f6c-b957-716ba8fdb38e/volumes" Nov 28 07:18:42 crc kubenswrapper[4922]: I1128 07:18:42.031830 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ed30601b-1e7c-4aa6-8469-8ff61cd93253","Type":"ContainerStarted","Data":"585e8abe2762a709ae6ce887d0ef00f835169d14caac14b38ddb07298e1c071f"} Nov 28 07:18:42 crc kubenswrapper[4922]: I1128 07:18:42.032209 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ed30601b-1e7c-4aa6-8469-8ff61cd93253","Type":"ContainerStarted","Data":"bf1cba7c9ab10696b41c6419bb24db199153cd327768de2455c89daa7f33c569"} Nov 28 07:18:42 crc kubenswrapper[4922]: I1128 07:18:42.077841 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.077808122 podStartE2EDuration="2.077808122s" podCreationTimestamp="2025-11-28 07:18:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:18:42.058858417 +0000 UTC m=+1566.979254109" watchObservedRunningTime="2025-11-28 07:18:42.077808122 +0000 UTC m=+1566.998203784" Nov 28 07:18:44 crc kubenswrapper[4922]: I1128 07:18:44.083780 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 07:18:44 crc kubenswrapper[4922]: I1128 07:18:44.084603 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 07:18:44 crc kubenswrapper[4922]: I1128 07:18:44.091312 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 07:18:44 crc kubenswrapper[4922]: I1128 07:18:44.096071 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 07:18:48 crc kubenswrapper[4922]: I1128 07:18:48.746600 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:48 crc kubenswrapper[4922]: I1128 07:18:48.825491 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:48 crc kubenswrapper[4922]: I1128 07:18:48.986458 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bbctq"] Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.124502 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bbctq" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="registry-server" containerID="cri-o://a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9" gracePeriod=2 Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.404743 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.404790 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.673353 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.714850 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-catalog-content\") pod \"61e29ed1-a71d-45cd-b892-c4145215a923\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.714956 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-utilities\") pod \"61e29ed1-a71d-45cd-b892-c4145215a923\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.715054 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-td9g5\" (UniqueName: \"kubernetes.io/projected/61e29ed1-a71d-45cd-b892-c4145215a923-kube-api-access-td9g5\") pod \"61e29ed1-a71d-45cd-b892-c4145215a923\" (UID: \"61e29ed1-a71d-45cd-b892-c4145215a923\") " Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.715941 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-utilities" (OuterVolumeSpecName: "utilities") pod "61e29ed1-a71d-45cd-b892-c4145215a923" (UID: "61e29ed1-a71d-45cd-b892-c4145215a923"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.720902 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61e29ed1-a71d-45cd-b892-c4145215a923-kube-api-access-td9g5" (OuterVolumeSpecName: "kube-api-access-td9g5") pod "61e29ed1-a71d-45cd-b892-c4145215a923" (UID: "61e29ed1-a71d-45cd-b892-c4145215a923"). InnerVolumeSpecName "kube-api-access-td9g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.817737 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-td9g5\" (UniqueName: \"kubernetes.io/projected/61e29ed1-a71d-45cd-b892-c4145215a923-kube-api-access-td9g5\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.817765 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.842125 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "61e29ed1-a71d-45cd-b892-c4145215a923" (UID: "61e29ed1-a71d-45cd-b892-c4145215a923"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:18:50 crc kubenswrapper[4922]: I1128 07:18:50.920361 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61e29ed1-a71d-45cd-b892-c4145215a923-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.142452 4922 generic.go:334] "Generic (PLEG): container finished" podID="61e29ed1-a71d-45cd-b892-c4145215a923" containerID="a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9" exitCode=0 Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.142534 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbctq" event={"ID":"61e29ed1-a71d-45cd-b892-c4145215a923","Type":"ContainerDied","Data":"a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9"} Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.142583 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bbctq" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.142758 4922 scope.go:117] "RemoveContainer" containerID="a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.142785 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbctq" event={"ID":"61e29ed1-a71d-45cd-b892-c4145215a923","Type":"ContainerDied","Data":"044de11a152f781951877730138e5d8b2109c1456587fdbf23f68f1d6ca96c08"} Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.193253 4922 scope.go:117] "RemoveContainer" containerID="f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.223742 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bbctq"] Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.233749 4922 scope.go:117] "RemoveContainer" containerID="0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.252133 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bbctq"] Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.287769 4922 scope.go:117] "RemoveContainer" containerID="a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9" Nov 28 07:18:51 crc kubenswrapper[4922]: E1128 07:18:51.288298 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9\": container with ID starting with a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9 not found: ID does not exist" containerID="a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.288326 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9"} err="failed to get container status \"a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9\": rpc error: code = NotFound desc = could not find container \"a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9\": container with ID starting with a79130da9f41fedba55ab74d2fdc7106335c3ac797600d28caf7cd44b4df76f9 not found: ID does not exist" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.288346 4922 scope.go:117] "RemoveContainer" containerID="f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711" Nov 28 07:18:51 crc kubenswrapper[4922]: E1128 07:18:51.288773 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711\": container with ID starting with f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711 not found: ID does not exist" containerID="f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.288844 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711"} err="failed to get container status \"f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711\": rpc error: code = NotFound desc = could not find container \"f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711\": container with ID starting with f5c9a826ca5b19e00418b01c978cdc7b6029d82dc4d0bfa979500d8f8060d711 not found: ID does not exist" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.288885 4922 scope.go:117] "RemoveContainer" containerID="0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de" Nov 28 07:18:51 crc kubenswrapper[4922]: E1128 07:18:51.289206 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de\": container with ID starting with 0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de not found: ID does not exist" containerID="0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.289281 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de"} err="failed to get container status \"0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de\": rpc error: code = NotFound desc = could not find container \"0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de\": container with ID starting with 0d9cc75603f3f7e588b38c8804fa5b200f965d8e356839d4ff9d2483809a58de not found: ID does not exist" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.413250 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" path="/var/lib/kubelet/pods/61e29ed1-a71d-45cd-b892-c4145215a923/volumes" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.418821 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:18:51 crc kubenswrapper[4922]: I1128 07:18:51.431445 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:18:57 crc kubenswrapper[4922]: I1128 07:18:57.311832 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:18:57 crc kubenswrapper[4922]: I1128 07:18:57.312454 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:19:00 crc kubenswrapper[4922]: I1128 07:19:00.415403 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 07:19:00 crc kubenswrapper[4922]: I1128 07:19:00.416610 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 07:19:00 crc kubenswrapper[4922]: I1128 07:19:00.418610 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 07:19:00 crc kubenswrapper[4922]: I1128 07:19:00.430479 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 07:19:01 crc kubenswrapper[4922]: I1128 07:19:01.278467 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 07:19:01 crc kubenswrapper[4922]: I1128 07:19:01.287205 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.553273 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lpw65"] Nov 28 07:19:16 crc kubenswrapper[4922]: E1128 07:19:16.554077 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="extract-utilities" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.554089 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="extract-utilities" Nov 28 07:19:16 crc kubenswrapper[4922]: E1128 07:19:16.554105 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="extract-content" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.554110 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="extract-content" Nov 28 07:19:16 crc kubenswrapper[4922]: E1128 07:19:16.554122 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="registry-server" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.554128 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="registry-server" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.554311 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="61e29ed1-a71d-45cd-b892-c4145215a923" containerName="registry-server" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.555501 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.591674 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lpw65"] Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.656689 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-catalog-content\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.656758 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l926r\" (UniqueName: \"kubernetes.io/projected/21fdc98b-667f-44b1-9fae-87f96ba4b514-kube-api-access-l926r\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.656821 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-utilities\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.758595 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-catalog-content\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.758643 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l926r\" (UniqueName: \"kubernetes.io/projected/21fdc98b-667f-44b1-9fae-87f96ba4b514-kube-api-access-l926r\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.758845 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-utilities\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.759356 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-utilities\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.759930 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-catalog-content\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.780750 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l926r\" (UniqueName: \"kubernetes.io/projected/21fdc98b-667f-44b1-9fae-87f96ba4b514-kube-api-access-l926r\") pod \"community-operators-lpw65\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:16 crc kubenswrapper[4922]: I1128 07:19:16.889187 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:17 crc kubenswrapper[4922]: I1128 07:19:17.444723 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lpw65"] Nov 28 07:19:17 crc kubenswrapper[4922]: I1128 07:19:17.497336 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lpw65" event={"ID":"21fdc98b-667f-44b1-9fae-87f96ba4b514","Type":"ContainerStarted","Data":"c307381d7222c0990ec28be82766db01bec35a83e3c6400b127d7aab77071ee7"} Nov 28 07:19:18 crc kubenswrapper[4922]: I1128 07:19:18.509160 4922 generic.go:334] "Generic (PLEG): container finished" podID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerID="48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e" exitCode=0 Nov 28 07:19:18 crc kubenswrapper[4922]: I1128 07:19:18.509255 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lpw65" event={"ID":"21fdc98b-667f-44b1-9fae-87f96ba4b514","Type":"ContainerDied","Data":"48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e"} Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.374303 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7d9b9667cd-5cmld"] Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.376731 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.399314 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-b597c9f45-g422b"] Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.401094 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437360 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef73d39-2ed2-4168-8598-e0749aa0a26b-logs\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437390 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data-custom\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437428 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437445 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437458 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data-custom\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437500 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-combined-ca-bundle\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437529 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szv69\" (UniqueName: \"kubernetes.io/projected/6ef73d39-2ed2-4168-8598-e0749aa0a26b-kube-api-access-szv69\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437563 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-combined-ca-bundle\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437587 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/454a2683-850f-4ce0-8ebe-7758105dd255-logs\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.437630 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7864\" (UniqueName: \"kubernetes.io/projected/454a2683-850f-4ce0-8ebe-7758105dd255-kube-api-access-b7864\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.444241 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.444472 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerName="cinder-scheduler" containerID="cri-o://e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114" gracePeriod=30 Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.444593 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerName="probe" containerID="cri-o://070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede" gracePeriod=30 Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.480513 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7d9b9667cd-5cmld"] Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.523849 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-b597c9f45-g422b"] Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540621 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-combined-ca-bundle\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540681 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szv69\" (UniqueName: \"kubernetes.io/projected/6ef73d39-2ed2-4168-8598-e0749aa0a26b-kube-api-access-szv69\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540742 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-combined-ca-bundle\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540772 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/454a2683-850f-4ce0-8ebe-7758105dd255-logs\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540835 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7864\" (UniqueName: \"kubernetes.io/projected/454a2683-850f-4ce0-8ebe-7758105dd255-kube-api-access-b7864\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540861 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef73d39-2ed2-4168-8598-e0749aa0a26b-logs\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540876 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data-custom\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540912 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540928 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.540944 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data-custom\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.541621 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/454a2683-850f-4ce0-8ebe-7758105dd255-logs\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.543030 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef73d39-2ed2-4168-8598-e0749aa0a26b-logs\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.561951 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.562204 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="264e478f-8337-4f40-b005-84a7cd802eaa" containerName="openstackclient" containerID="cri-o://6d78c8d0fed333630cf5081b1caacc151cab7db6d74c7fe5e06ec18ea0087bce" gracePeriod=2 Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.577487 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data-custom\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.578127 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-combined-ca-bundle\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.578993 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.583794 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-combined-ca-bundle\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.585286 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.588808 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data-custom\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.592885 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.631825 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7864\" (UniqueName: \"kubernetes.io/projected/454a2683-850f-4ce0-8ebe-7758105dd255-kube-api-access-b7864\") pod \"barbican-keystone-listener-7d9b9667cd-5cmld\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.710904 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.733958 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szv69\" (UniqueName: \"kubernetes.io/projected/6ef73d39-2ed2-4168-8598-e0749aa0a26b-kube-api-access-szv69\") pod \"barbican-worker-b597c9f45-g422b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.746856 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:19 crc kubenswrapper[4922]: I1128 07:19:19.870718 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 07:19:19 crc kubenswrapper[4922]: E1128 07:19:19.988260 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 07:19:19 crc kubenswrapper[4922]: E1128 07:19:19.988315 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data podName:4cf25acc-0d60-4b0a-a9c9-adc7ddce7458 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:20.488299831 +0000 UTC m=+1605.408695413 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data") pod "rabbitmq-server-0" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458") : configmap "rabbitmq-config-data" not found Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.015281 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-757bbb5fbd-lx4kn"] Nov 28 07:19:20 crc kubenswrapper[4922]: E1128 07:19:20.015734 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="264e478f-8337-4f40-b005-84a7cd802eaa" containerName="openstackclient" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.015759 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="264e478f-8337-4f40-b005-84a7cd802eaa" containerName="openstackclient" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.015943 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="264e478f-8337-4f40-b005-84a7cd802eaa" containerName="openstackclient" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.016980 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.029422 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance75df-account-delete-wdw2x"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.030596 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.071571 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance75df-account-delete-wdw2x"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.089973 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldx9v\" (UniqueName: \"kubernetes.io/projected/1f339784-df58-44f7-947d-9d80559c1c0c-kube-api-access-ldx9v\") pod \"glance75df-account-delete-wdw2x\" (UID: \"1f339784-df58-44f7-947d-9d80559c1c0c\") " pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.090020 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-internal-tls-certs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.090043 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.090103 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88wn4\" (UniqueName: \"kubernetes.io/projected/f1e0e318-5b90-4c18-ba95-fc261ffb519d-kube-api-access-88wn4\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.090118 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-public-tls-certs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.090144 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e0e318-5b90-4c18-ba95-fc261ffb519d-logs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.090160 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts\") pod \"glance75df-account-delete-wdw2x\" (UID: \"1f339784-df58-44f7-947d-9d80559c1c0c\") " pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.090307 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data-custom\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.090343 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-combined-ca-bundle\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.108555 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-757bbb5fbd-lx4kn"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.175021 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.175248 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api-log" containerID="cri-o://31b146f13fd3f77793153557e7b67237270c42c2a177ad53d0c2fee7a88ba3e1" gracePeriod=30 Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.175370 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api" containerID="cri-o://b1de83b3a6f902b036e25483c19523a902ba3f397b59a81a79e88727c51fa4bb" gracePeriod=30 Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194093 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldx9v\" (UniqueName: \"kubernetes.io/projected/1f339784-df58-44f7-947d-9d80559c1c0c-kube-api-access-ldx9v\") pod \"glance75df-account-delete-wdw2x\" (UID: \"1f339784-df58-44f7-947d-9d80559c1c0c\") " pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194384 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-internal-tls-certs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194405 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194456 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88wn4\" (UniqueName: \"kubernetes.io/projected/f1e0e318-5b90-4c18-ba95-fc261ffb519d-kube-api-access-88wn4\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194471 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-public-tls-certs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194502 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e0e318-5b90-4c18-ba95-fc261ffb519d-logs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194518 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts\") pod \"glance75df-account-delete-wdw2x\" (UID: \"1f339784-df58-44f7-947d-9d80559c1c0c\") " pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194590 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data-custom\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.194627 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-combined-ca-bundle\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.195907 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e0e318-5b90-4c18-ba95-fc261ffb519d-logs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.201524 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts\") pod \"glance75df-account-delete-wdw2x\" (UID: \"1f339784-df58-44f7-947d-9d80559c1c0c\") " pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.219964 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.242351 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.244883 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-combined-ca-bundle\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.246053 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-internal-tls-certs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.260045 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data-custom\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.260089 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-public-tls-certs\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.294943 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88wn4\" (UniqueName: \"kubernetes.io/projected/f1e0e318-5b90-4c18-ba95-fc261ffb519d-kube-api-access-88wn4\") pod \"barbican-api-757bbb5fbd-lx4kn\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.322758 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldx9v\" (UniqueName: \"kubernetes.io/projected/1f339784-df58-44f7-947d-9d80559c1c0c-kube-api-access-ldx9v\") pod \"glance75df-account-delete-wdw2x\" (UID: \"1f339784-df58-44f7-947d-9d80559c1c0c\") " pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.342755 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.380775 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-4jk5t"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.382845 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-4jk5t" podUID="2070fbd8-e847-4b99-ba55-4579804bbc57" containerName="openstack-network-exporter" containerID="cri-o://7c57b6ea918e23a8d2aa6a00589247875694d1068bfad7021c5c76b0cae05bf4" gracePeriod=30 Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.386114 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.399394 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" secret="" err="secret \"dnsmasq-dns-dockercfg-njtmg\" not found" Nov 28 07:19:20 crc kubenswrapper[4922]: E1128 07:19:20.399831 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:20 crc kubenswrapper[4922]: E1128 07:19:20.427197 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data podName:99708a5d-57d5-4479-8e09-94428bb13fa3 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:20.927161998 +0000 UTC m=+1605.847557580 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data") pod "rabbitmq-cell1-server-0" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3") : configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.433316 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placementdbc9-account-delete-68tbl"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.434635 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.456803 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-xqzrg"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.478974 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementdbc9-account-delete-68tbl"] Nov 28 07:19:20 crc kubenswrapper[4922]: E1128 07:19:20.502013 4922 configmap.go:193] Couldn't get configMap openstack/dns: configmap "dns" not found Nov 28 07:19:20 crc kubenswrapper[4922]: E1128 07:19:20.502311 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config podName:2a50cebf-c40b-425a-86a1-7813277f1b5a nodeName:}" failed. No retries permitted until 2025-11-28 07:19:21.002291217 +0000 UTC m=+1605.922686869 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config") pod "dnsmasq-dns-6bc646c8f9-nfgpb" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a") : configmap "dns" not found Nov 28 07:19:20 crc kubenswrapper[4922]: E1128 07:19:20.503832 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 07:19:20 crc kubenswrapper[4922]: E1128 07:19:20.503865 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data podName:4cf25acc-0d60-4b0a-a9c9-adc7ddce7458 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:21.503856698 +0000 UTC m=+1606.424252280 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data") pod "rabbitmq-server-0" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458") : configmap "rabbitmq-config-data" not found Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.521095 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-m9xpz"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.601566 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts\") pod \"placementdbc9-account-delete-68tbl\" (UID: \"ca6d488f-6085-4e22-a325-1b749d8c154c\") " pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.601736 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbd82\" (UniqueName: \"kubernetes.io/projected/ca6d488f-6085-4e22-a325-1b749d8c154c-kube-api-access-dbd82\") pod \"placementdbc9-account-delete-68tbl\" (UID: \"ca6d488f-6085-4e22-a325-1b749d8c154c\") " pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.633922 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-wk4t8"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.637449 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lpw65" event={"ID":"21fdc98b-667f-44b1-9fae-87f96ba4b514","Type":"ContainerStarted","Data":"828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089"} Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.703029 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbd82\" (UniqueName: \"kubernetes.io/projected/ca6d488f-6085-4e22-a325-1b749d8c154c-kube-api-access-dbd82\") pod \"placementdbc9-account-delete-68tbl\" (UID: \"ca6d488f-6085-4e22-a325-1b749d8c154c\") " pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.703114 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts\") pod \"placementdbc9-account-delete-68tbl\" (UID: \"ca6d488f-6085-4e22-a325-1b749d8c154c\") " pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.703759 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts\") pod \"placementdbc9-account-delete-68tbl\" (UID: \"ca6d488f-6085-4e22-a325-1b749d8c154c\") " pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.711586 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cindere247-account-delete-5kjlk"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.712809 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.760862 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-wk4t8"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.805601 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfkfd\" (UniqueName: \"kubernetes.io/projected/1018f07c-38b9-440f-b126-26e59293e757-kube-api-access-wfkfd\") pod \"cindere247-account-delete-5kjlk\" (UID: \"1018f07c-38b9-440f-b126-26e59293e757\") " pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.805810 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1018f07c-38b9-440f-b126-26e59293e757-operator-scripts\") pod \"cindere247-account-delete-5kjlk\" (UID: \"1018f07c-38b9-440f-b126-26e59293e757\") " pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.806384 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cindere247-account-delete-5kjlk"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.809070 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbd82\" (UniqueName: \"kubernetes.io/projected/ca6d488f-6085-4e22-a325-1b749d8c154c-kube-api-access-dbd82\") pod \"placementdbc9-account-delete-68tbl\" (UID: \"ca6d488f-6085-4e22-a325-1b749d8c154c\") " pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.858118 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.883155 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-gc24f"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.908866 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfkfd\" (UniqueName: \"kubernetes.io/projected/1018f07c-38b9-440f-b126-26e59293e757-kube-api-access-wfkfd\") pod \"cindere247-account-delete-5kjlk\" (UID: \"1018f07c-38b9-440f-b126-26e59293e757\") " pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.908978 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1018f07c-38b9-440f-b126-26e59293e757-operator-scripts\") pod \"cindere247-account-delete-5kjlk\" (UID: \"1018f07c-38b9-440f-b126-26e59293e757\") " pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.909762 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1018f07c-38b9-440f-b126-26e59293e757-operator-scripts\") pod \"cindere247-account-delete-5kjlk\" (UID: \"1018f07c-38b9-440f-b126-26e59293e757\") " pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.930766 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-gc24f"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.947070 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.947404 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="ovn-northd" containerID="cri-o://ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" gracePeriod=30 Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.947903 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="openstack-network-exporter" containerID="cri-o://080f55e8e51ff6a214b0fe9fe62cc38adee207ae9bc0a4e40e78d515f29e447e" gracePeriod=30 Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.973645 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron5206-account-delete-64tk8"] Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.974854 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:20 crc kubenswrapper[4922]: I1128 07:19:20.978909 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfkfd\" (UniqueName: \"kubernetes.io/projected/1018f07c-38b9-440f-b126-26e59293e757-kube-api-access-wfkfd\") pod \"cindere247-account-delete-5kjlk\" (UID: \"1018f07c-38b9-440f-b126-26e59293e757\") " pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.010691 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.010753 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data podName:99708a5d-57d5-4479-8e09-94428bb13fa3 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:22.010738974 +0000 UTC m=+1606.931134556 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data") pod "rabbitmq-cell1-server-0" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3") : configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.010795 4922 configmap.go:193] Couldn't get configMap openstack/dns: configmap "dns" not found Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.010813 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config podName:2a50cebf-c40b-425a-86a1-7813277f1b5a nodeName:}" failed. No retries permitted until 2025-11-28 07:19:22.010807466 +0000 UTC m=+1606.931203048 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config") pod "dnsmasq-dns-6bc646c8f9-nfgpb" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a") : configmap "dns" not found Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.023459 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-6kzln"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.048480 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-4p4rg"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.074895 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.075365 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-6kzln"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.158689 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-operator-scripts\") pod \"neutron5206-account-delete-64tk8\" (UID: \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\") " pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.160532 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron5206-account-delete-64tk8"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.168016 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5phw\" (UniqueName: \"kubernetes.io/projected/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-kube-api-access-s5phw\") pod \"neutron5206-account-delete-64tk8\" (UID: \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\") " pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.311047 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-operator-scripts\") pod \"neutron5206-account-delete-64tk8\" (UID: \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\") " pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.311316 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5phw\" (UniqueName: \"kubernetes.io/projected/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-kube-api-access-s5phw\") pod \"neutron5206-account-delete-64tk8\" (UID: \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\") " pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.312626 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-operator-scripts\") pod \"neutron5206-account-delete-64tk8\" (UID: \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\") " pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.356432 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican5229-account-delete-qq87v"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.389963 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.404797 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5phw\" (UniqueName: \"kubernetes.io/projected/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-kube-api-access-s5phw\") pod \"neutron5206-account-delete-64tk8\" (UID: \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\") " pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.441796 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts\") pod \"barbican5229-account-delete-qq87v\" (UID: \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\") " pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.442067 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ph6f\" (UniqueName: \"kubernetes.io/projected/3e442fd0-cd46-4c04-afb3-96892d39c0f4-kube-api-access-8ph6f\") pod \"barbican5229-account-delete-qq87v\" (UID: \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\") " pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.547293 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ph6f\" (UniqueName: \"kubernetes.io/projected/3e442fd0-cd46-4c04-afb3-96892d39c0f4-kube-api-access-8ph6f\") pod \"barbican5229-account-delete-qq87v\" (UID: \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\") " pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.547742 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts\") pod \"barbican5229-account-delete-qq87v\" (UID: \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\") " pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.549422 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts\") pod \"barbican5229-account-delete-qq87v\" (UID: \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\") " pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.552373 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.552438 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data podName:4cf25acc-0d60-4b0a-a9c9-adc7ddce7458 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:23.552421566 +0000 UTC m=+1608.472817138 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data") pod "rabbitmq-server-0" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458") : configmap "rabbitmq-config-data" not found Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.583181 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="086c6cef-6f21-46b9-ace7-f06ffff84fb3" path="/var/lib/kubelet/pods/086c6cef-6f21-46b9-ace7-f06ffff84fb3/volumes" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.583918 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e87eedc-5f8a-46e2-bce1-c0361074a7f0" path="/var/lib/kubelet/pods/2e87eedc-5f8a-46e2-bce1-c0361074a7f0/volumes" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.602358 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b78e6a70-a315-4c6d-8731-a21335e18766" path="/var/lib/kubelet/pods/b78e6a70-a315-4c6d-8731-a21335e18766/volumes" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.603026 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-4p4rg"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.624294 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican5229-account-delete-qq87v"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.633628 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.645505 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.646365 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerName="openstack-network-exporter" containerID="cri-o://b461584211ac1833270ce57012470931433410147f21b7bbc92e13735e9d4731" gracePeriod=300 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.672334 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-cj2fg"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.721281 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-cj2fg"] Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.728377 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.733142 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc646c8f9-nfgpb"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.733399 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" podUID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerName="dnsmasq-dns" containerID="cri-o://4f746f52686852078d4054015bbe108873cac8ffaf0e003c3bb348ac22540b3b" gracePeriod=10 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.744649 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-4q5pb"] Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.753044 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.755817 4922 generic.go:334] "Generic (PLEG): container finished" podID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerID="828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089" exitCode=0 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.755913 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lpw65" event={"ID":"21fdc98b-667f-44b1-9fae-87f96ba4b514","Type":"ContainerDied","Data":"828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089"} Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.757298 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 07:19:21 crc kubenswrapper[4922]: E1128 07:19:21.757371 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="ovn-northd" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.758613 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ph6f\" (UniqueName: \"kubernetes.io/projected/3e442fd0-cd46-4c04-afb3-96892d39c0f4-kube-api-access-8ph6f\") pod \"barbican5229-account-delete-qq87v\" (UID: \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\") " pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.761275 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-4q5pb"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.784584 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.784935 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="openstack-network-exporter" containerID="cri-o://ed684f6b629f4e8652e956f602ceebf88d808cf52630b1ec4ad72baf8b709140" gracePeriod=300 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.785232 4922 generic.go:334] "Generic (PLEG): container finished" podID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerID="080f55e8e51ff6a214b0fe9fe62cc38adee207ae9bc0a4e40e78d515f29e447e" exitCode=2 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.785281 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e070595b-ded5-4ba1-8e5d-10dee3f64439","Type":"ContainerDied","Data":"080f55e8e51ff6a214b0fe9fe62cc38adee207ae9bc0a4e40e78d515f29e447e"} Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.800638 4922 generic.go:334] "Generic (PLEG): container finished" podID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerID="31b146f13fd3f77793153557e7b67237270c42c2a177ad53d0c2fee7a88ba3e1" exitCode=143 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.800703 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c038b865-4b32-4be3-9e0a-8c40dc140a68","Type":"ContainerDied","Data":"31b146f13fd3f77793153557e7b67237270c42c2a177ad53d0c2fee7a88ba3e1"} Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.817302 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapi78f1-account-delete-mwv4p"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.818624 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.825148 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5f78c88b9d-zp4nm"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.825797 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5f78c88b9d-zp4nm" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" containerName="placement-log" containerID="cri-o://2007239de0a02e5d422915e862a8620464730ddc29632ad08e99aaf25724d88b" gracePeriod=30 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.825920 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5f78c88b9d-zp4nm" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" containerName="placement-api" containerID="cri-o://ca51681097b612d4dfe7b851461b5269b74ed8bfc135acb38f4eda642ba424ca" gracePeriod=30 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.835528 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-4jk5t_2070fbd8-e847-4b99-ba55-4579804bbc57/openstack-network-exporter/0.log" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.835581 4922 generic.go:334] "Generic (PLEG): container finished" podID="2070fbd8-e847-4b99-ba55-4579804bbc57" containerID="7c57b6ea918e23a8d2aa6a00589247875694d1068bfad7021c5c76b0cae05bf4" exitCode=2 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.835612 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-4jk5t" event={"ID":"2070fbd8-e847-4b99-ba55-4579804bbc57","Type":"ContainerDied","Data":"7c57b6ea918e23a8d2aa6a00589247875694d1068bfad7021c5c76b0cae05bf4"} Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.851088 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.851321 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerName="glance-log" containerID="cri-o://c0b119f326caea92368ebddce52cea98327f9c2b47b9496a3050507d1378df2b" gracePeriod=30 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.851693 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerName="glance-httpd" containerID="cri-o://b462e55e4b1d1673313c6dfd787beceb0503ca800228d804b043cfabe37c1295" gracePeriod=30 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.855911 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hj2p7\" (UniqueName: \"kubernetes.io/projected/2425a44a-c3c8-4533-9aa6-deb657556efb-kube-api-access-hj2p7\") pod \"novaapi78f1-account-delete-mwv4p\" (UID: \"2425a44a-c3c8-4533-9aa6-deb657556efb\") " pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.855997 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts\") pod \"novaapi78f1-account-delete-mwv4p\" (UID: \"2425a44a-c3c8-4533-9aa6-deb657556efb\") " pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.876368 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell032a6-account-delete-tcwgc"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.877977 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.919111 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.922389 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerName="ovsdbserver-sb" containerID="cri-o://38e3e9f9a924e02c5e76f4a6a76132d9a4596633913f1d0199cbf2b940a82fc6" gracePeriod=300 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.958528 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2gk9\" (UniqueName: \"kubernetes.io/projected/5e4e7296-ad39-41c1-9399-b3c9072c9158-kube-api-access-c2gk9\") pod \"novacell032a6-account-delete-tcwgc\" (UID: \"5e4e7296-ad39-41c1-9399-b3c9072c9158\") " pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.958714 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hj2p7\" (UniqueName: \"kubernetes.io/projected/2425a44a-c3c8-4533-9aa6-deb657556efb-kube-api-access-hj2p7\") pod \"novaapi78f1-account-delete-mwv4p\" (UID: \"2425a44a-c3c8-4533-9aa6-deb657556efb\") " pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.958753 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts\") pod \"novacell032a6-account-delete-tcwgc\" (UID: \"5e4e7296-ad39-41c1-9399-b3c9072c9158\") " pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.958825 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts\") pod \"novaapi78f1-account-delete-mwv4p\" (UID: \"2425a44a-c3c8-4533-9aa6-deb657556efb\") " pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.960498 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts\") pod \"novaapi78f1-account-delete-mwv4p\" (UID: \"2425a44a-c3c8-4533-9aa6-deb657556efb\") " pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.963410 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell032a6-account-delete-tcwgc"] Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.996760 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="ovsdbserver-nb" containerID="cri-o://240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed" gracePeriod=300 Nov 28 07:19:21 crc kubenswrapper[4922]: I1128 07:19:21.999313 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hj2p7\" (UniqueName: \"kubernetes.io/projected/2425a44a-c3c8-4533-9aa6-deb657556efb-kube-api-access-hj2p7\") pod \"novaapi78f1-account-delete-mwv4p\" (UID: \"2425a44a-c3c8-4533-9aa6-deb657556efb\") " pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.028068 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi78f1-account-delete-mwv4p"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.042006 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.042472 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="182970fb-401f-404c-81c1-db0294b02167" containerName="glance-log" containerID="cri-o://f4286284a20c6edd0e0aa4a65c4e2c67716484e715938e31801e5192f3451cb4" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.042684 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="182970fb-401f-404c-81c1-db0294b02167" containerName="glance-httpd" containerID="cri-o://d454c9cde22b3235276b93c623bcd54e057f380c3dc05277d2f60b55df5ae160" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.061385 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts\") pod \"novacell032a6-account-delete-tcwgc\" (UID: \"5e4e7296-ad39-41c1-9399-b3c9072c9158\") " pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.061512 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2gk9\" (UniqueName: \"kubernetes.io/projected/5e4e7296-ad39-41c1-9399-b3c9072c9158-kube-api-access-c2gk9\") pod \"novacell032a6-account-delete-tcwgc\" (UID: \"5e4e7296-ad39-41c1-9399-b3c9072c9158\") " pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.063173 4922 configmap.go:193] Couldn't get configMap openstack/dns: configmap "dns" not found Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.063239 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config podName:2a50cebf-c40b-425a-86a1-7813277f1b5a nodeName:}" failed. No retries permitted until 2025-11-28 07:19:24.063211147 +0000 UTC m=+1608.983606729 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config") pod "dnsmasq-dns-6bc646c8f9-nfgpb" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a") : configmap "dns" not found Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.063744 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.063808 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.063866 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data podName:99708a5d-57d5-4479-8e09-94428bb13fa3 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:24.063849763 +0000 UTC m=+1608.984245345 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data") pod "rabbitmq-cell1-server-0" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3") : configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064374 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-server" containerID="cri-o://d3b90768a305d76f6bbe78d7fd4d3b39f50f3a28bba75d44bc70674b4aca8f70" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064537 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts\") pod \"novacell032a6-account-delete-tcwgc\" (UID: \"5e4e7296-ad39-41c1-9399-b3c9072c9158\") " pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064596 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-updater" containerID="cri-o://12b16430a89a5b1e56a5f6468f519f0cc78f9332249e9e70b835663f8c8cf7f6" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064723 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="swift-recon-cron" containerID="cri-o://ca8c40529da8875dd3000d01b8bd8b36258e6bc188214902c4efe8f876ef3f55" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064771 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="rsync" containerID="cri-o://be7c02ef3f8f4787f48ed58720143cde50c0717059e0ee9cfb0efd2e23816536" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064816 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-expirer" containerID="cri-o://cdce219795ca597bf90a3ef5a37914a0508699345238c0f783326c7603848cb7" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064857 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-updater" containerID="cri-o://f0b2a682fe557d959bf583e5ecb3012d332cc749a2c216c33f7c5ba7ffe503cb" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064870 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-auditor" containerID="cri-o://853f98691c2c9eea9fa65f3f60694178106dd650a20abb2f93f5e169033283dc" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064894 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-auditor" containerID="cri-o://a015be7baa9e4f6ad1e91446da7a6a6130d283d562293232f646fbaf0306cb39" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064918 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-replicator" containerID="cri-o://23b4034aad6d0adc0dd28dbcdf65c0e6bd65ce23fd1a27f18dd635918d96a2c8" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064935 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-replicator" containerID="cri-o://b375cb1d189915e8086ce1f9c17697360db2e2a53cd535ab4f5f9cf1df90a46c" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064955 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-server" containerID="cri-o://7c24d63778c887d25e502fb8e85199d831af25e203c2a011764b487e1c2c78a1" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.064989 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-server" containerID="cri-o://e4501a3ca18be2fca539c740e8acc3816b850c674385c2c3d353d186ec5bbec5" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.065000 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-reaper" containerID="cri-o://ee22fa27f603d2ddf49e9b07c51a65a98b3a7d08d855a70a1f3939851f7f60c5" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.065046 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-auditor" containerID="cri-o://26dd3a6e06dd6158dbd36b9a5fc4871c38c4a4f2e97a3e98df0085998f6374ae" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.065064 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-replicator" containerID="cri-o://67652f0b906ad1c1976e1f6ddc6a979b5ca575b0328d4c60af513fc48df7cb8c" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.083453 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-m8jjn"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.110645 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2gk9\" (UniqueName: \"kubernetes.io/projected/5e4e7296-ad39-41c1-9399-b3c9072c9158-kube-api-access-c2gk9\") pod \"novacell032a6-account-delete-tcwgc\" (UID: \"5e4e7296-ad39-41c1-9399-b3c9072c9158\") " pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.134023 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.139611 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-m8jjn"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.166384 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-jrqbz"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.172281 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-jrqbz"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.186646 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerName="rabbitmq" containerID="cri-o://0b65d7751c631796afddb9d5cb6be8b33791f093200379b257e1458a02ef94be" gracePeriod=604800 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.187087 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85487d674f-dfq9s"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.187425 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85487d674f-dfq9s" podUID="cf191164-20d6-4d60-b111-6373616d9622" containerName="neutron-api" containerID="cri-o://e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.187810 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85487d674f-dfq9s" podUID="cf191164-20d6-4d60-b111-6373616d9622" containerName="neutron-httpd" containerID="cri-o://f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.191756 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.199870 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.206483 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-cc5b55cb5-8tgkn"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.209384 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-httpd" containerID="cri-o://2cd63c61c70658881bcd06ab76b9b69600a15655926ae05749a525c4458f85a1" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.209699 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-server" containerID="cri-o://0c46d19aa54b7ae1c585537a2f9d7f0d49af908f52150fea5f7f93185d9dc261" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.225036 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.283842 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-84495d76c8-mkvcb"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.284590 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" containerName="barbican-keystone-listener-log" containerID="cri-o://3004dce57f11a2ee90d32c564f6d0b320053fb2f43c4069e423362623aae9bfa" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.285543 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" containerName="barbican-keystone-listener" containerID="cri-o://6c09aa779d052923b62c13eee209268276067fcf2d60f0ab88d8d7db5fd25ca4" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.307976 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7d9b9667cd-5cmld"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.313752 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="rabbitmq" containerID="cri-o://58bc4c962a13bd392e1f3d48869c89613df686531a50bf08290a8747044899ed" gracePeriod=604800 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.375482 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-b597c9f45-g422b"] Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.379955 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed is running failed: container process not found" containerID="240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.395099 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed is running failed: container process not found" containerID="240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.406170 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed is running failed: container process not found" containerID="240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.406824 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-nb-0" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="ovsdbserver-nb" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.423274 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.423540 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-log" containerID="cri-o://15a728fdcdf56d07c7207624ae8d8276363300730509050cda619bc595b50b6d" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.424010 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-metadata" containerID="cri-o://a81a1de4776394ff567145b64398d5b889f141b8c8414e1def8da4f9987ce3f1" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.484286 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-5b7cbd995c-rwlzz"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.484505 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerName="barbican-worker-log" containerID="cri-o://b4963147ea9e2c2244fba11f3323e999f07d35d36b28648b34f61f69856ae968" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.484917 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerName="barbican-worker" containerID="cri-o://1013a435a5db00a3fabb1bd3f992a2b6780c248c282088a565511d2d85e0aa49" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.497504 4922 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 07:19:22 crc kubenswrapper[4922]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 07:19:22 crc kubenswrapper[4922]: + source /usr/local/bin/container-scripts/functions Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNBridge=br-int Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNRemote=tcp:localhost:6642 Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNEncapType=geneve Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNAvailabilityZones= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ EnableChassisAsGateway=true Nov 28 07:19:22 crc kubenswrapper[4922]: ++ PhysicalNetworks= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNHostName= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 07:19:22 crc kubenswrapper[4922]: ++ ovs_dir=/var/lib/openvswitch Nov 28 07:19:22 crc kubenswrapper[4922]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 07:19:22 crc kubenswrapper[4922]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 07:19:22 crc kubenswrapper[4922]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + sleep 0.5 Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + sleep 0.5 Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + cleanup_ovsdb_server_semaphore Nov 28 07:19:22 crc kubenswrapper[4922]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 07:19:22 crc kubenswrapper[4922]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 07:19:22 crc kubenswrapper[4922]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-m9xpz" message=< Nov 28 07:19:22 crc kubenswrapper[4922]: Exiting ovsdb-server (5) [ OK ] Nov 28 07:19:22 crc kubenswrapper[4922]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 07:19:22 crc kubenswrapper[4922]: + source /usr/local/bin/container-scripts/functions Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNBridge=br-int Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNRemote=tcp:localhost:6642 Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNEncapType=geneve Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNAvailabilityZones= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ EnableChassisAsGateway=true Nov 28 07:19:22 crc kubenswrapper[4922]: ++ PhysicalNetworks= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNHostName= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 07:19:22 crc kubenswrapper[4922]: ++ ovs_dir=/var/lib/openvswitch Nov 28 07:19:22 crc kubenswrapper[4922]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 07:19:22 crc kubenswrapper[4922]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 07:19:22 crc kubenswrapper[4922]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + sleep 0.5 Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + sleep 0.5 Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + cleanup_ovsdb_server_semaphore Nov 28 07:19:22 crc kubenswrapper[4922]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 07:19:22 crc kubenswrapper[4922]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 07:19:22 crc kubenswrapper[4922]: > Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.497544 4922 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 07:19:22 crc kubenswrapper[4922]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 07:19:22 crc kubenswrapper[4922]: + source /usr/local/bin/container-scripts/functions Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNBridge=br-int Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNRemote=tcp:localhost:6642 Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNEncapType=geneve Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNAvailabilityZones= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ EnableChassisAsGateway=true Nov 28 07:19:22 crc kubenswrapper[4922]: ++ PhysicalNetworks= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ OVNHostName= Nov 28 07:19:22 crc kubenswrapper[4922]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 07:19:22 crc kubenswrapper[4922]: ++ ovs_dir=/var/lib/openvswitch Nov 28 07:19:22 crc kubenswrapper[4922]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 07:19:22 crc kubenswrapper[4922]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 07:19:22 crc kubenswrapper[4922]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + sleep 0.5 Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + sleep 0.5 Nov 28 07:19:22 crc kubenswrapper[4922]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 07:19:22 crc kubenswrapper[4922]: + cleanup_ovsdb_server_semaphore Nov 28 07:19:22 crc kubenswrapper[4922]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 07:19:22 crc kubenswrapper[4922]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 07:19:22 crc kubenswrapper[4922]: > pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" containerID="cri-o://7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.497576 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" containerID="cri-o://7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" gracePeriod=29 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.517636 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-qp85z"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.549685 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-qp85z"] Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.565786 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a50cebf_c40b_425a_86a1_7813277f1b5a.slice/crio-4f746f52686852078d4054015bbe108873cac8ffaf0e003c3bb348ac22540b3b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd27299ac_7d8d_4485_86fb_6ac7f34ea1ae.slice/crio-ed684f6b629f4e8652e956f602ceebf88d808cf52630b1ec4ad72baf8b709140.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod182970fb_401f_404c_81c1_db0294b02167.slice/crio-f4286284a20c6edd0e0aa4a65c4e2c67716484e715938e31801e5192f3451cb4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod264e478f_8337_4f40_b005_84a7cd802eaa.slice/crio-6d78c8d0fed333630cf5081b1caacc151cab7db6d74c7fe5e06ec18ea0087bce.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46c3d0a8_d9ed_419a_baf3_57aaaf0c56fe.slice/crio-cdce219795ca597bf90a3ef5a37914a0508699345238c0f783326c7603848cb7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02d26a97_d447_4f76_90ed_9357e343cd91.slice/crio-2007239de0a02e5d422915e862a8620464730ddc29632ad08e99aaf25724d88b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46c3d0a8_d9ed_419a_baf3_57aaaf0c56fe.slice/crio-b375cb1d189915e8086ce1f9c17697360db2e2a53cd535ab4f5f9cf1df90a46c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46c3d0a8_d9ed_419a_baf3_57aaaf0c56fe.slice/crio-a015be7baa9e4f6ad1e91446da7a6a6130d283d562293232f646fbaf0306cb39.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39a4d24f_6b5b_48fc_ab66_1ad33462c477.slice/crio-b461584211ac1833270ce57012470931433410147f21b7bbc92e13735e9d4731.scope\": RecentStats: unable to find data in memory cache]" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.588351 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-b597c9f45-g422b"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.627773 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.628231 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-log" containerID="cri-o://bf1cba7c9ab10696b41c6419bb24db199153cd327768de2455c89daa7f33c569" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.628477 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-api" containerID="cri-o://585e8abe2762a709ae6ce887d0ef00f835169d14caac14b38ddb07298e1c071f" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.647852 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7947bcd956-482dv"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.648071 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7947bcd956-482dv" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api-log" containerID="cri-o://565068b14a54fb0888fb374689df2d8105a821181b3cf92257d558b607b5df90" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.648190 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7947bcd956-482dv" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api" containerID="cri-o://8d636352f04a5d6b017ec6d07127ee84924adb2187df00d394fe1e1c6dd31678" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.652070 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-757bbb5fbd-lx4kn"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.661818 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-18f0-account-create-update-4rjkq"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.672151 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" containerID="cri-o://35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" gracePeriod=28 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.677170 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="349fc74f-b0ac-437d-89ab-7106192b8e9e" containerName="galera" containerID="cri-o://e866e4c558d425e16d02cae7a1249331e5e9aee144a65a24341d72360112484f" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.685338 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-18f0-account-create-update-4rjkq"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.694979 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.695180 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="98e654e6-cf7b-469f-aa60-118fee0e3764" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.734371 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.734595 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" containerName="nova-scheduler-scheduler" containerID="cri-o://6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.750001 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ncg57"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.758533 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.758755 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="b2566655-e076-471c-af4c-1e218f70ebe1" containerName="nova-cell1-conductor-conductor" containerID="cri-o://94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.774312 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ncg57"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.794858 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7d9b9667cd-5cmld"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.800988 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.801252 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" containerName="nova-cell0-conductor-conductor" containerID="cri-o://e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" gracePeriod=30 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.813305 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr9rg"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.819446 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-fr9rg"] Nov 28 07:19:22 crc kubenswrapper[4922]: W1128 07:19:22.837644 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod454a2683_850f_4ce0_8ebe_7758105dd255.slice/crio-82d9e543274e4be8f4826c69d7ecc1131e9e082d0a1ba62067cf8480c2e27a01 WatchSource:0}: Error finding container 82d9e543274e4be8f4826c69d7ecc1131e9e082d0a1ba62067cf8480c2e27a01: Status 404 returned error can't find the container with id 82d9e543274e4be8f4826c69d7ecc1131e9e082d0a1ba62067cf8480c2e27a01 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.880866 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.893389 4922 generic.go:334] "Generic (PLEG): container finished" podID="cf191164-20d6-4d60-b111-6373616d9622" containerID="f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.893443 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85487d674f-dfq9s" event={"ID":"cf191164-20d6-4d60-b111-6373616d9622","Type":"ContainerDied","Data":"f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.902908 4922 generic.go:334] "Generic (PLEG): container finished" podID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerID="15a728fdcdf56d07c7207624ae8d8276363300730509050cda619bc595b50b6d" exitCode=143 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.903044 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e1382f2-6597-4c09-a171-8709e4b9f5f7","Type":"ContainerDied","Data":"15a728fdcdf56d07c7207624ae8d8276363300730509050cda619bc595b50b6d"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.915403 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-4jk5t_2070fbd8-e847-4b99-ba55-4579804bbc57/openstack-network-exporter/0.log" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.919434 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.981803 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.983175 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984488 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="be7c02ef3f8f4787f48ed58720143cde50c0717059e0ee9cfb0efd2e23816536" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984512 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="cdce219795ca597bf90a3ef5a37914a0508699345238c0f783326c7603848cb7" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984521 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="f0b2a682fe557d959bf583e5ecb3012d332cc749a2c216c33f7c5ba7ffe503cb" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984529 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="a015be7baa9e4f6ad1e91446da7a6a6130d283d562293232f646fbaf0306cb39" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984535 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="b375cb1d189915e8086ce1f9c17697360db2e2a53cd535ab4f5f9cf1df90a46c" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984544 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="e4501a3ca18be2fca539c740e8acc3816b850c674385c2c3d353d186ec5bbec5" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984571 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"be7c02ef3f8f4787f48ed58720143cde50c0717059e0ee9cfb0efd2e23816536"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984623 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"cdce219795ca597bf90a3ef5a37914a0508699345238c0f783326c7603848cb7"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984625 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="12b16430a89a5b1e56a5f6468f519f0cc78f9332249e9e70b835663f8c8cf7f6" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984634 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"f0b2a682fe557d959bf583e5ecb3012d332cc749a2c216c33f7c5ba7ffe503cb"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984651 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"a015be7baa9e4f6ad1e91446da7a6a6130d283d562293232f646fbaf0306cb39"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984662 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"b375cb1d189915e8086ce1f9c17697360db2e2a53cd535ab4f5f9cf1df90a46c"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984671 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"e4501a3ca18be2fca539c740e8acc3816b850c674385c2c3d353d186ec5bbec5"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984680 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"12b16430a89a5b1e56a5f6468f519f0cc78f9332249e9e70b835663f8c8cf7f6"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984697 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"853f98691c2c9eea9fa65f3f60694178106dd650a20abb2f93f5e169033283dc"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984635 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="853f98691c2c9eea9fa65f3f60694178106dd650a20abb2f93f5e169033283dc" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984717 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="23b4034aad6d0adc0dd28dbcdf65c0e6bd65ce23fd1a27f18dd635918d96a2c8" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984731 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="7c24d63778c887d25e502fb8e85199d831af25e203c2a011764b487e1c2c78a1" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984737 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="ee22fa27f603d2ddf49e9b07c51a65a98b3a7d08d855a70a1f3939851f7f60c5" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984744 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="26dd3a6e06dd6158dbd36b9a5fc4871c38c4a4f2e97a3e98df0085998f6374ae" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984750 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="67652f0b906ad1c1976e1f6ddc6a979b5ca575b0328d4c60af513fc48df7cb8c" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984791 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"23b4034aad6d0adc0dd28dbcdf65c0e6bd65ce23fd1a27f18dd635918d96a2c8"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984820 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"7c24d63778c887d25e502fb8e85199d831af25e203c2a011764b487e1c2c78a1"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984829 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"ee22fa27f603d2ddf49e9b07c51a65a98b3a7d08d855a70a1f3939851f7f60c5"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984839 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"26dd3a6e06dd6158dbd36b9a5fc4871c38c4a4f2e97a3e98df0085998f6374ae"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.984848 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"67652f0b906ad1c1976e1f6ddc6a979b5ca575b0328d4c60af513fc48df7cb8c"} Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.984725 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:22 crc kubenswrapper[4922]: E1128 07:19:22.984872 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" containerName="nova-cell0-conductor-conductor" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.987175 4922 generic.go:334] "Generic (PLEG): container finished" podID="5ece1333-c457-4099-bf00-1daa969a14dc" containerID="3004dce57f11a2ee90d32c564f6d0b320053fb2f43c4069e423362623aae9bfa" exitCode=143 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.987269 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" event={"ID":"5ece1333-c457-4099-bf00-1daa969a14dc","Type":"ContainerDied","Data":"3004dce57f11a2ee90d32c564f6d0b320053fb2f43c4069e423362623aae9bfa"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.989250 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d27299ac-7d8d-4485-86fb-6ac7f34ea1ae/ovsdbserver-nb/0.log" Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.989283 4922 generic.go:334] "Generic (PLEG): container finished" podID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerID="ed684f6b629f4e8652e956f602ceebf88d808cf52630b1ec4ad72baf8b709140" exitCode=2 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.989293 4922 generic.go:334] "Generic (PLEG): container finished" podID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerID="240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed" exitCode=143 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.989332 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae","Type":"ContainerDied","Data":"ed684f6b629f4e8652e956f602ceebf88d808cf52630b1ec4ad72baf8b709140"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.989347 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae","Type":"ContainerDied","Data":"240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.993116 4922 generic.go:334] "Generic (PLEG): container finished" podID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" exitCode=0 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.993180 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m9xpz" event={"ID":"ec882eb7-01fb-4f7f-bad8-812346e5880e","Type":"ContainerDied","Data":"7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5"} Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.995673 4922 generic.go:334] "Generic (PLEG): container finished" podID="182970fb-401f-404c-81c1-db0294b02167" containerID="f4286284a20c6edd0e0aa4a65c4e2c67716484e715938e31801e5192f3451cb4" exitCode=143 Nov 28 07:19:22 crc kubenswrapper[4922]: I1128 07:19:22.995747 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"182970fb-401f-404c-81c1-db0294b02167","Type":"ContainerDied","Data":"f4286284a20c6edd0e0aa4a65c4e2c67716484e715938e31801e5192f3451cb4"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.002295 4922 generic.go:334] "Generic (PLEG): container finished" podID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerID="bf1cba7c9ab10696b41c6419bb24db199153cd327768de2455c89daa7f33c569" exitCode=143 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.002404 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ed30601b-1e7c-4aa6-8469-8ff61cd93253","Type":"ContainerDied","Data":"bf1cba7c9ab10696b41c6419bb24db199153cd327768de2455c89daa7f33c569"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.004753 4922 generic.go:334] "Generic (PLEG): container finished" podID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerID="c0b119f326caea92368ebddce52cea98327f9c2b47b9496a3050507d1378df2b" exitCode=143 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.004797 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc","Type":"ContainerDied","Data":"c0b119f326caea92368ebddce52cea98327f9c2b47b9496a3050507d1378df2b"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.005705 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b597c9f45-g422b" event={"ID":"6ef73d39-2ed2-4168-8598-e0749aa0a26b","Type":"ContainerStarted","Data":"31b1b75d807a8cc6d66f615108f0120c087831e1386533fe825bfcb303db49b4"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.007767 4922 generic.go:334] "Generic (PLEG): container finished" podID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerID="070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede" exitCode=0 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.007817 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05","Type":"ContainerDied","Data":"070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.010313 4922 generic.go:334] "Generic (PLEG): container finished" podID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerID="2cd63c61c70658881bcd06ab76b9b69600a15655926ae05749a525c4458f85a1" exitCode=0 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.010357 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" event={"ID":"c10f3b66-a7e0-4690-939a-5938de689b3a","Type":"ContainerDied","Data":"2cd63c61c70658881bcd06ab76b9b69600a15655926ae05749a525c4458f85a1"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.040830 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2070fbd8-e847-4b99-ba55-4579804bbc57-config\") pod \"2070fbd8-e847-4b99-ba55-4579804bbc57\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.040945 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovs-rundir\") pod \"2070fbd8-e847-4b99-ba55-4579804bbc57\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.041001 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovn-rundir\") pod \"2070fbd8-e847-4b99-ba55-4579804bbc57\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.041040 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwdv9\" (UniqueName: \"kubernetes.io/projected/2070fbd8-e847-4b99-ba55-4579804bbc57-kube-api-access-hwdv9\") pod \"2070fbd8-e847-4b99-ba55-4579804bbc57\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.041111 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-metrics-certs-tls-certs\") pod \"2070fbd8-e847-4b99-ba55-4579804bbc57\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.041140 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-combined-ca-bundle\") pod \"2070fbd8-e847-4b99-ba55-4579804bbc57\" (UID: \"2070fbd8-e847-4b99-ba55-4579804bbc57\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.041257 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "2070fbd8-e847-4b99-ba55-4579804bbc57" (UID: "2070fbd8-e847-4b99-ba55-4579804bbc57"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.041304 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "2070fbd8-e847-4b99-ba55-4579804bbc57" (UID: "2070fbd8-e847-4b99-ba55-4579804bbc57"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.041657 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2070fbd8-e847-4b99-ba55-4579804bbc57-config" (OuterVolumeSpecName: "config") pod "2070fbd8-e847-4b99-ba55-4579804bbc57" (UID: "2070fbd8-e847-4b99-ba55-4579804bbc57"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.042653 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2070fbd8-e847-4b99-ba55-4579804bbc57-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.042674 4922 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.043053 4922 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2070fbd8-e847-4b99-ba55-4579804bbc57-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.061209 4922 generic.go:334] "Generic (PLEG): container finished" podID="02d26a97-d447-4f76-90ed-9357e343cd91" containerID="2007239de0a02e5d422915e862a8620464730ddc29632ad08e99aaf25724d88b" exitCode=143 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.061503 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f78c88b9d-zp4nm" event={"ID":"02d26a97-d447-4f76-90ed-9357e343cd91","Type":"ContainerDied","Data":"2007239de0a02e5d422915e862a8620464730ddc29632ad08e99aaf25724d88b"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.062977 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.065129 4922 generic.go:334] "Generic (PLEG): container finished" podID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerID="565068b14a54fb0888fb374689df2d8105a821181b3cf92257d558b607b5df90" exitCode=143 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.065623 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7947bcd956-482dv" event={"ID":"1d97532b-e9ff-4031-a82c-3db5e943bfd9","Type":"ContainerDied","Data":"565068b14a54fb0888fb374689df2d8105a821181b3cf92257d558b607b5df90"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.073260 4922 generic.go:334] "Generic (PLEG): container finished" podID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerID="b4963147ea9e2c2244fba11f3323e999f07d35d36b28648b34f61f69856ae968" exitCode=143 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.073387 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" event={"ID":"f8685dc8-7577-4076-8a5a-beba52e9bae7","Type":"ContainerDied","Data":"b4963147ea9e2c2244fba11f3323e999f07d35d36b28648b34f61f69856ae968"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.083025 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2070fbd8-e847-4b99-ba55-4579804bbc57-kube-api-access-hwdv9" (OuterVolumeSpecName: "kube-api-access-hwdv9") pod "2070fbd8-e847-4b99-ba55-4579804bbc57" (UID: "2070fbd8-e847-4b99-ba55-4579804bbc57"). InnerVolumeSpecName "kube-api-access-hwdv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.089495 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_39a4d24f-6b5b-48fc-ab66-1ad33462c477/ovsdbserver-sb/0.log" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.089543 4922 generic.go:334] "Generic (PLEG): container finished" podID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerID="b461584211ac1833270ce57012470931433410147f21b7bbc92e13735e9d4731" exitCode=2 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.089560 4922 generic.go:334] "Generic (PLEG): container finished" podID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerID="38e3e9f9a924e02c5e76f4a6a76132d9a4596633913f1d0199cbf2b940a82fc6" exitCode=143 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.089630 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39a4d24f-6b5b-48fc-ab66-1ad33462c477","Type":"ContainerDied","Data":"b461584211ac1833270ce57012470931433410147f21b7bbc92e13735e9d4731"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.089657 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39a4d24f-6b5b-48fc-ab66-1ad33462c477","Type":"ContainerDied","Data":"38e3e9f9a924e02c5e76f4a6a76132d9a4596633913f1d0199cbf2b940a82fc6"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.091889 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" event={"ID":"454a2683-850f-4ce0-8ebe-7758105dd255","Type":"ContainerStarted","Data":"82d9e543274e4be8f4826c69d7ecc1131e9e082d0a1ba62067cf8480c2e27a01"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.094000 4922 generic.go:334] "Generic (PLEG): container finished" podID="264e478f-8337-4f40-b005-84a7cd802eaa" containerID="6d78c8d0fed333630cf5081b1caacc151cab7db6d74c7fe5e06ec18ea0087bce" exitCode=137 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.094074 4922 scope.go:117] "RemoveContainer" containerID="6d78c8d0fed333630cf5081b1caacc151cab7db6d74c7fe5e06ec18ea0087bce" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.094260 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.097437 4922 generic.go:334] "Generic (PLEG): container finished" podID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerID="4f746f52686852078d4054015bbe108873cac8ffaf0e003c3bb348ac22540b3b" exitCode=0 Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.097463 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" event={"ID":"2a50cebf-c40b-425a-86a1-7813277f1b5a","Type":"ContainerDied","Data":"4f746f52686852078d4054015bbe108873cac8ffaf0e003c3bb348ac22540b3b"} Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.130241 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.137190 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-757bbb5fbd-lx4kn"] Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.145087 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbzbw\" (UniqueName: \"kubernetes.io/projected/264e478f-8337-4f40-b005-84a7cd802eaa-kube-api-access-bbzbw\") pod \"264e478f-8337-4f40-b005-84a7cd802eaa\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.149613 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config-secret\") pod \"264e478f-8337-4f40-b005-84a7cd802eaa\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.149642 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-combined-ca-bundle\") pod \"264e478f-8337-4f40-b005-84a7cd802eaa\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.149676 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config\") pod \"264e478f-8337-4f40-b005-84a7cd802eaa\" (UID: \"264e478f-8337-4f40-b005-84a7cd802eaa\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.150422 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwdv9\" (UniqueName: \"kubernetes.io/projected/2070fbd8-e847-4b99-ba55-4579804bbc57-kube-api-access-hwdv9\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.218172 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/264e478f-8337-4f40-b005-84a7cd802eaa-kube-api-access-bbzbw" (OuterVolumeSpecName: "kube-api-access-bbzbw") pod "264e478f-8337-4f40-b005-84a7cd802eaa" (UID: "264e478f-8337-4f40-b005-84a7cd802eaa"). InnerVolumeSpecName "kube-api-access-bbzbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.218856 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "264e478f-8337-4f40-b005-84a7cd802eaa" (UID: "264e478f-8337-4f40-b005-84a7cd802eaa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.231344 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2070fbd8-e847-4b99-ba55-4579804bbc57" (UID: "2070fbd8-e847-4b99-ba55-4579804bbc57"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.239828 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "264e478f-8337-4f40-b005-84a7cd802eaa" (UID: "264e478f-8337-4f40-b005-84a7cd802eaa"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.254756 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-swift-storage-0\") pod \"2a50cebf-c40b-425a-86a1-7813277f1b5a\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.255137 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw9zw\" (UniqueName: \"kubernetes.io/projected/2a50cebf-c40b-425a-86a1-7813277f1b5a-kube-api-access-bw9zw\") pod \"2a50cebf-c40b-425a-86a1-7813277f1b5a\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.255203 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-sb\") pod \"2a50cebf-c40b-425a-86a1-7813277f1b5a\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.255280 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-nb\") pod \"2a50cebf-c40b-425a-86a1-7813277f1b5a\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.255364 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config\") pod \"2a50cebf-c40b-425a-86a1-7813277f1b5a\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.255447 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-svc\") pod \"2a50cebf-c40b-425a-86a1-7813277f1b5a\" (UID: \"2a50cebf-c40b-425a-86a1-7813277f1b5a\") " Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.256382 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.256429 4922 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.256441 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.256451 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbzbw\" (UniqueName: \"kubernetes.io/projected/264e478f-8337-4f40-b005-84a7cd802eaa-kube-api-access-bbzbw\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.289923 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a50cebf-c40b-425a-86a1-7813277f1b5a-kube-api-access-bw9zw" (OuterVolumeSpecName: "kube-api-access-bw9zw") pod "2a50cebf-c40b-425a-86a1-7813277f1b5a" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a"). InnerVolumeSpecName "kube-api-access-bw9zw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.357974 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bw9zw\" (UniqueName: \"kubernetes.io/projected/2a50cebf-c40b-425a-86a1-7813277f1b5a-kube-api-access-bw9zw\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.417129 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="067b3033-51f7-4b75-a86b-f3e666119c7f" path="/var/lib/kubelet/pods/067b3033-51f7-4b75-a86b-f3e666119c7f/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.417883 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a538d8e-6665-46a0-bf05-2957a37bc9a2" path="/var/lib/kubelet/pods/0a538d8e-6665-46a0-bf05-2957a37bc9a2/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.423727 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2844ae42-cefd-4863-b8f4-95d253b1a5b4" path="/var/lib/kubelet/pods/2844ae42-cefd-4863-b8f4-95d253b1a5b4/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.427526 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bf381d5-4211-44a0-8fd6-4b1c05fb690d" path="/var/lib/kubelet/pods/2bf381d5-4211-44a0-8fd6-4b1c05fb690d/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.428387 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c7a8e1f-5f3c-451e-b407-6afb6d5443e3" path="/var/lib/kubelet/pods/4c7a8e1f-5f3c-451e-b407-6afb6d5443e3/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.435836 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d1b0372-7d56-456f-bcb5-2bad202c6f8f" path="/var/lib/kubelet/pods/7d1b0372-7d56-456f-bcb5-2bad202c6f8f/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.436450 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97450035-755a-422c-ade7-b3bd97b917cd" path="/var/lib/kubelet/pods/97450035-755a-422c-ade7-b3bd97b917cd/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.437047 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa053525-2f00-4415-b9b5-35948c8e5038" path="/var/lib/kubelet/pods/aa053525-2f00-4415-b9b5-35948c8e5038/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.445593 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daea5abd-a0b5-4504-a7e5-53ec88446745" path="/var/lib/kubelet/pods/daea5abd-a0b5-4504-a7e5-53ec88446745/volumes" Nov 28 07:19:23 crc kubenswrapper[4922]: E1128 07:19:23.568376 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 07:19:23 crc kubenswrapper[4922]: E1128 07:19:23.568703 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data podName:4cf25acc-0d60-4b0a-a9c9-adc7ddce7458 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:27.568686721 +0000 UTC m=+1612.489082303 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data") pod "rabbitmq-server-0" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458") : configmap "rabbitmq-config-data" not found Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.583590 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "264e478f-8337-4f40-b005-84a7cd802eaa" (UID: "264e478f-8337-4f40-b005-84a7cd802eaa"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.646187 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2a50cebf-c40b-425a-86a1-7813277f1b5a" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.652689 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config" (OuterVolumeSpecName: "config") pod "2a50cebf-c40b-425a-86a1-7813277f1b5a" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.666875 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2a50cebf-c40b-425a-86a1-7813277f1b5a" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.674476 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.674500 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.674508 4922 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.674518 4922 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/264e478f-8337-4f40-b005-84a7cd802eaa-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.683432 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2a50cebf-c40b-425a-86a1-7813277f1b5a" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.687753 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2a50cebf-c40b-425a-86a1-7813277f1b5a" (UID: "2a50cebf-c40b-425a-86a1-7813277f1b5a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.707488 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "2070fbd8-e847-4b99-ba55-4579804bbc57" (UID: "2070fbd8-e847-4b99-ba55-4579804bbc57"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.776170 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2070fbd8-e847-4b99-ba55-4579804bbc57-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.776461 4922 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.776471 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2a50cebf-c40b-425a-86a1-7813277f1b5a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.882031 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cindere247-account-delete-5kjlk"] Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.918917 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron5206-account-delete-64tk8"] Nov 28 07:19:23 crc kubenswrapper[4922]: I1128 07:19:23.968278 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance75df-account-delete-wdw2x"] Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.006813 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementdbc9-account-delete-68tbl"] Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.058351 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 07:19:24 crc kubenswrapper[4922]: W1128 07:19:24.069916 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca6d488f_6085_4e22_a325_1b749d8c154c.slice/crio-3c1ae0c005e1f6a82024e6b130e9743b9a7886d2c5ab43b54b49958beb409f77 WatchSource:0}: Error finding container 3c1ae0c005e1f6a82024e6b130e9743b9a7886d2c5ab43b54b49958beb409f77: Status 404 returned error can't find the container with id 3c1ae0c005e1f6a82024e6b130e9743b9a7886d2c5ab43b54b49958beb409f77 Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.092714 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d27299ac-7d8d-4485-86fb-6ac7f34ea1ae/ovsdbserver-nb/0.log" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.092782 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 07:19:24 crc kubenswrapper[4922]: E1128 07:19:24.095456 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:24 crc kubenswrapper[4922]: E1128 07:19:24.095521 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data podName:99708a5d-57d5-4479-8e09-94428bb13fa3 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:28.095503657 +0000 UTC m=+1613.015899239 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data") pod "rabbitmq-cell1-server-0" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3") : configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.109286 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-757bbb5fbd-lx4kn" event={"ID":"f1e0e318-5b90-4c18-ba95-fc261ffb519d","Type":"ContainerStarted","Data":"e63699a485817ccaccdf5b0db749f1a06973bbcadeed876b228797dbc9d18497"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.112577 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-4jk5t_2070fbd8-e847-4b99-ba55-4579804bbc57/openstack-network-exporter/0.log" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.112637 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-4jk5t" event={"ID":"2070fbd8-e847-4b99-ba55-4579804bbc57","Type":"ContainerDied","Data":"fd772f1a0597fd171709b6d336192b3f6450b377f74b33a65abea622abc9a97f"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.112669 4922 scope.go:117] "RemoveContainer" containerID="7c57b6ea918e23a8d2aa6a00589247875694d1068bfad7021c5c76b0cae05bf4" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.112780 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-4jk5t" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.139300 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron5206-account-delete-64tk8" event={"ID":"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372","Type":"ContainerStarted","Data":"9a6103901834cc16af5d88e6469564dcb347e6903d65d1a0a0aa50a2d30dd4a9"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.168368 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_39a4d24f-6b5b-48fc-ab66-1ad33462c477/ovsdbserver-sb/0.log" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.168432 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.176764 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.176954 4922 generic.go:334] "Generic (PLEG): container finished" podID="98e654e6-cf7b-469f-aa60-118fee0e3764" containerID="4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71" exitCode=0 Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.177060 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"98e654e6-cf7b-469f-aa60-118fee0e3764","Type":"ContainerDied","Data":"4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.177091 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"98e654e6-cf7b-469f-aa60-118fee0e3764","Type":"ContainerDied","Data":"1a3a5d7523eb65abd59b6761275aa5706b3953f2d4e19f25dbfa1c0ddbd7328a"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.179256 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-4jk5t"] Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.193101 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-4jk5t"] Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.193431 4922 generic.go:334] "Generic (PLEG): container finished" podID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerID="e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114" exitCode=0 Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.193569 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.193971 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05","Type":"ContainerDied","Data":"e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.193995 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05","Type":"ContainerDied","Data":"73eda3046837957e632c01f6c732ea7f1263aaaf13c41e448b812853ee42f156"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196105 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data-custom\") pod \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196161 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbcvt\" (UniqueName: \"kubernetes.io/projected/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-kube-api-access-nbcvt\") pod \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196192 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-scripts\") pod \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196298 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-scripts\") pod \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196331 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdb-rundir\") pod \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196412 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-config\") pod \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196438 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-etc-machine-id\") pod \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196749 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data\") pod \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.196785 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-combined-ca-bundle\") pod \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.199687 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-scripts" (OuterVolumeSpecName: "scripts") pod "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" (UID: "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.199742 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-config" (OuterVolumeSpecName: "config") pod "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" (UID: "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.199798 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" (UID: "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.199854 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" (UID: "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.206252 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdbserver-nb-tls-certs\") pod \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.206622 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65xxr\" (UniqueName: \"kubernetes.io/projected/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-kube-api-access-65xxr\") pod \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.206655 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-combined-ca-bundle\") pod \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\" (UID: \"678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.206679 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-metrics-certs-tls-certs\") pod \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.206715 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\" (UID: \"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.216187 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.216212 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.216252 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.216260 4922 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.217001 4922 scope.go:117] "RemoveContainer" containerID="4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.218205 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" (UID: "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.226004 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="d3b90768a305d76f6bbe78d7fd4d3b39f50f3a28bba75d44bc70674b4aca8f70" exitCode=0 Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.226063 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"d3b90768a305d76f6bbe78d7fd4d3b39f50f3a28bba75d44bc70674b4aca8f70"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.227914 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-kube-api-access-65xxr" (OuterVolumeSpecName: "kube-api-access-65xxr") pod "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" (UID: "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae"). InnerVolumeSpecName "kube-api-access-65xxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.229464 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementdbc9-account-delete-68tbl" event={"ID":"ca6d488f-6085-4e22-a325-1b749d8c154c","Type":"ContainerStarted","Data":"3c1ae0c005e1f6a82024e6b130e9743b9a7886d2c5ab43b54b49958beb409f77"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.232451 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-kube-api-access-nbcvt" (OuterVolumeSpecName: "kube-api-access-nbcvt") pod "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" (UID: "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05"). InnerVolumeSpecName "kube-api-access-nbcvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.233336 4922 generic.go:334] "Generic (PLEG): container finished" podID="349fc74f-b0ac-437d-89ab-7106192b8e9e" containerID="e866e4c558d425e16d02cae7a1249331e5e9aee144a65a24341d72360112484f" exitCode=0 Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.233384 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"349fc74f-b0ac-437d-89ab-7106192b8e9e","Type":"ContainerDied","Data":"e866e4c558d425e16d02cae7a1249331e5e9aee144a65a24341d72360112484f"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.233552 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-scripts" (OuterVolumeSpecName: "scripts") pod "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" (UID: "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.244504 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" (UID: "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.249504 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi78f1-account-delete-mwv4p"] Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.254030 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_39a4d24f-6b5b-48fc-ab66-1ad33462c477/ovsdbserver-sb/0.log" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.254160 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.254312 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"39a4d24f-6b5b-48fc-ab66-1ad33462c477","Type":"ContainerDied","Data":"b5071c1943aa02a0d763ccb09244c3588d19f030bea2c6ea0dd5385ade9fc987"} Nov 28 07:19:24 crc kubenswrapper[4922]: W1128 07:19:24.254821 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2425a44a_c3c8_4533_9aa6_deb657556efb.slice/crio-7bfa8c1248500fef224bcac683aacd82b8b175f254693177bb479a8cb36f835f WatchSource:0}: Error finding container 7bfa8c1248500fef224bcac683aacd82b8b175f254693177bb479a8cb36f835f: Status 404 returned error can't find the container with id 7bfa8c1248500fef224bcac683aacd82b8b175f254693177bb479a8cb36f835f Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.257732 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican5229-account-delete-qq87v"] Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.258851 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" event={"ID":"454a2683-850f-4ce0-8ebe-7758105dd255","Type":"ContainerStarted","Data":"44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.262467 4922 generic.go:334] "Generic (PLEG): container finished" podID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerID="0c46d19aa54b7ae1c585537a2f9d7f0d49af908f52150fea5f7f93185d9dc261" exitCode=0 Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.262523 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" event={"ID":"c10f3b66-a7e0-4690-939a-5938de689b3a","Type":"ContainerDied","Data":"0c46d19aa54b7ae1c585537a2f9d7f0d49af908f52150fea5f7f93185d9dc261"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.264206 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance75df-account-delete-wdw2x" event={"ID":"1f339784-df58-44f7-947d-9d80559c1c0c","Type":"ContainerStarted","Data":"82e2aea8a8118d592fb635465fa1cabb0ff91e106bd61f291e2a196c1a5611e4"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.265854 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b597c9f45-g422b" event={"ID":"6ef73d39-2ed2-4168-8598-e0749aa0a26b","Type":"ContainerStarted","Data":"5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.267328 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" event={"ID":"2a50cebf-c40b-425a-86a1-7813277f1b5a","Type":"ContainerDied","Data":"dc1012a60da575ba3700b918a6462fac59c76357be0de437ceb9253da366d18e"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.267414 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.281279 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d27299ac-7d8d-4485-86fb-6ac7f34ea1ae/ovsdbserver-nb/0.log" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.281364 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d27299ac-7d8d-4485-86fb-6ac7f34ea1ae","Type":"ContainerDied","Data":"b8fcb175ab0edf5fb2b69492452725d0f9af96c20603ff5db168462c5286327e"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.281463 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.291799 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cindere247-account-delete-5kjlk" event={"ID":"1018f07c-38b9-440f-b126-26e59293e757","Type":"ContainerStarted","Data":"0da564880ff2e86c3526a1c95f92a4bf1c199ab670d8f52be60cbd4ad3bef35a"} Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.320578 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-metrics-certs-tls-certs\") pod \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.320701 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-vencrypt-tls-certs\") pod \"98e654e6-cf7b-469f-aa60-118fee0e3764\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.320924 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-config-data\") pod \"98e654e6-cf7b-469f-aa60-118fee0e3764\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.321013 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdb-rundir\") pod \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.321755 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "39a4d24f-6b5b-48fc-ab66-1ad33462c477" (UID: "39a4d24f-6b5b-48fc-ab66-1ad33462c477"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.321406 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdbserver-sb-tls-certs\") pod \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.321882 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nblg2\" (UniqueName: \"kubernetes.io/projected/39a4d24f-6b5b-48fc-ab66-1ad33462c477-kube-api-access-nblg2\") pod \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.321934 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-config\") pod \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.321958 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zspr\" (UniqueName: \"kubernetes.io/projected/98e654e6-cf7b-469f-aa60-118fee0e3764-kube-api-access-9zspr\") pod \"98e654e6-cf7b-469f-aa60-118fee0e3764\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.321982 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.322009 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-scripts\") pod \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.322034 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-combined-ca-bundle\") pod \"98e654e6-cf7b-469f-aa60-118fee0e3764\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.322071 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-combined-ca-bundle\") pod \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\" (UID: \"39a4d24f-6b5b-48fc-ab66-1ad33462c477\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.322104 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-nova-novncproxy-tls-certs\") pod \"98e654e6-cf7b-469f-aa60-118fee0e3764\" (UID: \"98e654e6-cf7b-469f-aa60-118fee0e3764\") " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.323481 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-config" (OuterVolumeSpecName: "config") pod "39a4d24f-6b5b-48fc-ab66-1ad33462c477" (UID: "39a4d24f-6b5b-48fc-ab66-1ad33462c477"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.325335 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-scripts" (OuterVolumeSpecName: "scripts") pod "39a4d24f-6b5b-48fc-ab66-1ad33462c477" (UID: "39a4d24f-6b5b-48fc-ab66-1ad33462c477"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.325864 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.326210 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65xxr\" (UniqueName: \"kubernetes.io/projected/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-kube-api-access-65xxr\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.326296 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.326314 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.326366 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.326380 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/39a4d24f-6b5b-48fc-ab66-1ad33462c477-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.326392 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbcvt\" (UniqueName: \"kubernetes.io/projected/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-kube-api-access-nbcvt\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:24 crc kubenswrapper[4922]: I1128 07:19:24.326425 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.361359 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39a4d24f-6b5b-48fc-ab66-1ad33462c477-kube-api-access-nblg2" (OuterVolumeSpecName: "kube-api-access-nblg2") pod "39a4d24f-6b5b-48fc-ab66-1ad33462c477" (UID: "39a4d24f-6b5b-48fc-ab66-1ad33462c477"). InnerVolumeSpecName "kube-api-access-nblg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.368506 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98e654e6-cf7b-469f-aa60-118fee0e3764-kube-api-access-9zspr" (OuterVolumeSpecName: "kube-api-access-9zspr") pod "98e654e6-cf7b-469f-aa60-118fee0e3764" (UID: "98e654e6-cf7b-469f-aa60-118fee0e3764"). InnerVolumeSpecName "kube-api-access-9zspr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.369830 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "39a4d24f-6b5b-48fc-ab66-1ad33462c477" (UID: "39a4d24f-6b5b-48fc-ab66-1ad33462c477"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.428635 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nblg2\" (UniqueName: \"kubernetes.io/projected/39a4d24f-6b5b-48fc-ab66-1ad33462c477-kube-api-access-nblg2\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.428661 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zspr\" (UniqueName: \"kubernetes.io/projected/98e654e6-cf7b-469f-aa60-118fee0e3764-kube-api-access-9zspr\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.428681 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.464103 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell032a6-account-delete-tcwgc"] Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.751583 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.790339 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.841470 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.841499 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.890793 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "39a4d24f-6b5b-48fc-ab66-1ad33462c477" (UID: "39a4d24f-6b5b-48fc-ab66-1ad33462c477"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:24.954222 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.165391 4922 scope.go:117] "RemoveContainer" containerID="4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71" Nov 28 07:19:25 crc kubenswrapper[4922]: E1128 07:19:25.174774 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71\": container with ID starting with 4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71 not found: ID does not exist" containerID="4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.174817 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71"} err="failed to get container status \"4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71\": rpc error: code = NotFound desc = could not find container \"4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71\": container with ID starting with 4e586197e5573dfc5e1c9ed3428653b242cd1c4575f148e1bde1a32ab14f8f71 not found: ID does not exist" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.174840 4922 scope.go:117] "RemoveContainer" containerID="070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.204384 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-config-data" (OuterVolumeSpecName: "config-data") pod "98e654e6-cf7b-469f-aa60-118fee0e3764" (UID: "98e654e6-cf7b-469f-aa60-118fee0e3764"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.204486 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" (UID: "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.227619 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data" (OuterVolumeSpecName: "config-data") pod "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" (UID: "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.263581 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.164:8776/healthcheck\": read tcp 10.217.0.2:45160->10.217.0.164:8776: read: connection reset by peer" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.264977 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.264995 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.265007 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.301452 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.309867 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" event={"ID":"c10f3b66-a7e0-4690-939a-5938de689b3a","Type":"ContainerDied","Data":"8066483262cfa9fff7a1529c49ca1ea9c235d6c9dadc545550e5346dee8a9c67"} Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.309897 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8066483262cfa9fff7a1529c49ca1ea9c235d6c9dadc545550e5346dee8a9c67" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.311736 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lpw65" event={"ID":"21fdc98b-667f-44b1-9fae-87f96ba4b514","Type":"ContainerStarted","Data":"cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390"} Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.312402 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "98e654e6-cf7b-469f-aa60-118fee0e3764" (UID: "98e654e6-cf7b-469f-aa60-118fee0e3764"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.328511 4922 generic.go:334] "Generic (PLEG): container finished" podID="92eb4ce5-fb24-4b33-8e79-6f4e7ba96372" containerID="53e8546901e2e79532d9bcc222c2dcdbe253ab144bd12f21ece8f360f849d3e9" exitCode=0 Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.328591 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron5206-account-delete-64tk8" event={"ID":"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372","Type":"ContainerDied","Data":"53e8546901e2e79532d9bcc222c2dcdbe253ab144bd12f21ece8f360f849d3e9"} Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.349968 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell032a6-account-delete-tcwgc" event={"ID":"5e4e7296-ad39-41c1-9399-b3c9072c9158","Type":"ContainerStarted","Data":"d4dab28ad08f4dcc5cb1f357a3e1dd6d6ac36c62db7101a62cbba27885bdda92"} Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.364416 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lpw65" podStartSLOduration=4.926029201 podStartE2EDuration="9.364400588s" podCreationTimestamp="2025-11-28 07:19:16 +0000 UTC" firstStartedPulling="2025-11-28 07:19:18.513320448 +0000 UTC m=+1603.433716060" lastFinishedPulling="2025-11-28 07:19:22.951691865 +0000 UTC m=+1607.872087447" observedRunningTime="2025-11-28 07:19:25.364037588 +0000 UTC m=+1610.284433170" watchObservedRunningTime="2025-11-28 07:19:25.364400588 +0000 UTC m=+1610.284796170" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.383459 4922 generic.go:334] "Generic (PLEG): container finished" podID="1018f07c-38b9-440f-b126-26e59293e757" containerID="0e1a10156d7aa395ea45da2312dbadfb3411b35bdd8119560d9c3758fe29d917" exitCode=0 Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.383550 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cindere247-account-delete-5kjlk" event={"ID":"1018f07c-38b9-440f-b126-26e59293e757","Type":"ContainerDied","Data":"0e1a10156d7aa395ea45da2312dbadfb3411b35bdd8119560d9c3758fe29d917"} Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.390579 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.408822 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" containerName="barbican-keystone-listener-log" containerID="cri-o://44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c" gracePeriod=30 Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.409309 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" containerName="barbican-keystone-listener" containerID="cri-o://5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb" gracePeriod=30 Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.443477 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2070fbd8-e847-4b99-ba55-4579804bbc57" path="/var/lib/kubelet/pods/2070fbd8-e847-4b99-ba55-4579804bbc57/volumes" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.444582 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="264e478f-8337-4f40-b005-84a7cd802eaa" path="/var/lib/kubelet/pods/264e478f-8337-4f40-b005-84a7cd802eaa/volumes" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.447526 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.450602 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.452826 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "39a4d24f-6b5b-48fc-ab66-1ad33462c477" (UID: "39a4d24f-6b5b-48fc-ab66-1ad33462c477"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.455319 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "98e654e6-cf7b-469f-aa60-118fee0e3764" (UID: "98e654e6-cf7b-469f-aa60-118fee0e3764"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.481120 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "39a4d24f-6b5b-48fc-ab66-1ad33462c477" (UID: "39a4d24f-6b5b-48fc-ab66-1ad33462c477"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.492824 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" podStartSLOduration=6.492807734 podStartE2EDuration="6.492807734s" podCreationTimestamp="2025-11-28 07:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:19:25.491612763 +0000 UTC m=+1610.412008355" watchObservedRunningTime="2025-11-28 07:19:25.492807734 +0000 UTC m=+1610.413203316" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.495993 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-generated\") pod \"349fc74f-b0ac-437d-89ab-7106192b8e9e\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496051 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-combined-ca-bundle\") pod \"349fc74f-b0ac-437d-89ab-7106192b8e9e\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496089 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"349fc74f-b0ac-437d-89ab-7106192b8e9e\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496162 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-galera-tls-certs\") pod \"349fc74f-b0ac-437d-89ab-7106192b8e9e\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496187 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-kolla-config\") pod \"349fc74f-b0ac-437d-89ab-7106192b8e9e\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496237 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp7dg\" (UniqueName: \"kubernetes.io/projected/349fc74f-b0ac-437d-89ab-7106192b8e9e-kube-api-access-qp7dg\") pod \"349fc74f-b0ac-437d-89ab-7106192b8e9e\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496277 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-default\") pod \"349fc74f-b0ac-437d-89ab-7106192b8e9e\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496313 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-operator-scripts\") pod \"349fc74f-b0ac-437d-89ab-7106192b8e9e\" (UID: \"349fc74f-b0ac-437d-89ab-7106192b8e9e\") " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496556 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496575 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/39a4d24f-6b5b-48fc-ab66-1ad33462c477-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.496585 4922 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.503342 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "349fc74f-b0ac-437d-89ab-7106192b8e9e" (UID: "349fc74f-b0ac-437d-89ab-7106192b8e9e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.505286 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "349fc74f-b0ac-437d-89ab-7106192b8e9e" (UID: "349fc74f-b0ac-437d-89ab-7106192b8e9e"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.509445 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "349fc74f-b0ac-437d-89ab-7106192b8e9e" (UID: "349fc74f-b0ac-437d-89ab-7106192b8e9e"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.512382 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/349fc74f-b0ac-437d-89ab-7106192b8e9e-kube-api-access-qp7dg" (OuterVolumeSpecName: "kube-api-access-qp7dg") pod "349fc74f-b0ac-437d-89ab-7106192b8e9e" (UID: "349fc74f-b0ac-437d-89ab-7106192b8e9e"). InnerVolumeSpecName "kube-api-access-qp7dg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.513026 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "349fc74f-b0ac-437d-89ab-7106192b8e9e" (UID: "349fc74f-b0ac-437d-89ab-7106192b8e9e"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.553353 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-b597c9f45-g422b" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerName="barbican-worker-log" containerID="cri-o://5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd" gracePeriod=30 Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.553501 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-b597c9f45-g422b" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerName="barbican-worker" containerID="cri-o://10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a" gracePeriod=30 Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.566360 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" (UID: "678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.599899 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.599927 4922 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.599936 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp7dg\" (UniqueName: \"kubernetes.io/projected/349fc74f-b0ac-437d-89ab-7106192b8e9e-kube-api-access-qp7dg\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.599945 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.599953 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/349fc74f-b0ac-437d-89ab-7106192b8e9e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.599962 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/349fc74f-b0ac-437d-89ab-7106192b8e9e-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.600064 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": read tcp 10.217.0.2:47672->10.217.0.200:8775: read: connection reset by peer" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.600314 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.200:8775/\": read tcp 10.217.0.2:47670->10.217.0.200:8775: read: connection reset by peer" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.615091 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "mysql-db") pod "349fc74f-b0ac-437d-89ab-7106192b8e9e" (UID: "349fc74f-b0ac-437d-89ab-7106192b8e9e"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.686507 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "349fc74f-b0ac-437d-89ab-7106192b8e9e" (UID: "349fc74f-b0ac-437d-89ab-7106192b8e9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.702349 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.702396 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.713805 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" (UID: "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.781201 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" (UID: "d27299ac-7d8d-4485-86fb-6ac7f34ea1ae"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.789153 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-b597c9f45-g422b" podStartSLOduration=6.789137399 podStartE2EDuration="6.789137399s" podCreationTimestamp="2025-11-28 07:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:19:25.744981174 +0000 UTC m=+1610.665376756" watchObservedRunningTime="2025-11-28 07:19:25.789137399 +0000 UTC m=+1610.709532981" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.803059 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "98e654e6-cf7b-469f-aa60-118fee0e3764" (UID: "98e654e6-cf7b-469f-aa60-118fee0e3764"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.804727 4922 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/98e654e6-cf7b-469f-aa60-118fee0e3764-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.804865 4922 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.804965 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.818409 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.827917 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "349fc74f-b0ac-437d-89ab-7106192b8e9e" (UID: "349fc74f-b0ac-437d-89ab-7106192b8e9e"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.907485 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:25 crc kubenswrapper[4922]: I1128 07:19:25.907522 4922 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/349fc74f-b0ac-437d-89ab-7106192b8e9e-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.414643 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3 is running failed: container process not found" containerID="94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.416638 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3 is running failed: container process not found" containerID="94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.424914 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3 is running failed: container process not found" containerID="94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.424992 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="b2566655-e076-471c-af4c-1e218f70ebe1" containerName="nova-cell1-conductor-conductor" Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.502997 4922 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.106s" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.503144 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" event={"ID":"454a2683-850f-4ce0-8ebe-7758105dd255","Type":"ContainerStarted","Data":"5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.503170 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-757bbb5fbd-lx4kn" event={"ID":"f1e0e318-5b90-4c18-ba95-fc261ffb519d","Type":"ContainerStarted","Data":"e36de2182dd35fee67fc5ebf689f54d2adc2e37e33fa381890f944ad027f5c3a"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.503190 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"349fc74f-b0ac-437d-89ab-7106192b8e9e","Type":"ContainerDied","Data":"0473de9d0028c45b30a6a715907f3797e6865f1ca57636aef4dea1cc1978c9a0"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.503204 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi78f1-account-delete-mwv4p" event={"ID":"2425a44a-c3c8-4533-9aa6-deb657556efb","Type":"ContainerStarted","Data":"7bfa8c1248500fef224bcac683aacd82b8b175f254693177bb479a8cb36f835f"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.503214 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b597c9f45-g422b" event={"ID":"6ef73d39-2ed2-4168-8598-e0749aa0a26b","Type":"ContainerStarted","Data":"10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.503250 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican5229-account-delete-qq87v" event={"ID":"3e442fd0-cd46-4c04-afb3-96892d39c0f4","Type":"ContainerStarted","Data":"5341f6059602566c33672ba2224e2aeacf76ae8c7da5acca05962074ca8a9a0d"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.503282 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.504448 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.504476 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.504668 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="8d12d285-16c4-4e64-98d8-cff0f581aee4" containerName="memcached" containerID="cri-o://6327f387153de8972c997a6ac2a21401ab00a932a9990342cf61d1e499fcf45e" gracePeriod=30 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.510698 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="ceilometer-central-agent" containerID="cri-o://76e1e7d5729fdebb0173eee23985b2e33e4bffc2543f1da0f92592472e21c4ec" gracePeriod=30 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.512554 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f672d6bb-97fc-4547-a14b-af27d631fe2a" containerName="kube-state-metrics" containerID="cri-o://97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0" gracePeriod=30 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.512709 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="proxy-httpd" containerID="cri-o://46994e6ac2bb018930a78ad0cd1fe4646ebb6039110f4ddf448db9a5fa5fd689" gracePeriod=30 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.512914 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="sg-core" containerID="cri-o://1e15529dfacc098964836d9b402c0c6f8dfa68f13f57273de3bff5e93b296db5" gracePeriod=30 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.513368 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="ceilometer-notification-agent" containerID="cri-o://3f6adb9e4600f443d95c7ef3d113f08146c9cb6aad10859be6aa10d59ce782f0" gracePeriod=30 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.556420 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-fj8qk"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.566252 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-7l6kx"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.582588 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-7l6kx"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.588512 4922 scope.go:117] "RemoveContainer" containerID="e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.601381 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-fj8qk"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.610540 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-c6c69b978-txpld"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.610783 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-c6c69b978-txpld" podUID="81469087-f8d4-4499-a1e3-9fe103758289" containerName="keystone-api" containerID="cri-o://e14162c5f538b87ed64ebb17f2d96cca074bc89ff83ab9d46b7b609216c66fb3" gracePeriod=30 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.620207 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.645623 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.660166 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-c9zx7"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.667849 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f279-account-create-update-jh2bs"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.670230 4922 generic.go:334] "Generic (PLEG): container finished" podID="182970fb-401f-404c-81c1-db0294b02167" containerID="d454c9cde22b3235276b93c623bcd54e057f380c3dc05277d2f60b55df5ae160" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.670392 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"182970fb-401f-404c-81c1-db0294b02167","Type":"ContainerDied","Data":"d454c9cde22b3235276b93c623bcd54e057f380c3dc05277d2f60b55df5ae160"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.670419 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"182970fb-401f-404c-81c1-db0294b02167","Type":"ContainerDied","Data":"027019815c473c0d1a21096a0dde1ca6ef8c331aee9f75ce7caae024df518966"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.670431 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="027019815c473c0d1a21096a0dde1ca6ef8c331aee9f75ce7caae024df518966" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.685331 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-c9zx7"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.696112 4922 generic.go:334] "Generic (PLEG): container finished" podID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerID="585e8abe2762a709ae6ce887d0ef00f835169d14caac14b38ddb07298e1c071f" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.696171 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ed30601b-1e7c-4aa6-8469-8ff61cd93253","Type":"ContainerDied","Data":"585e8abe2762a709ae6ce887d0ef00f835169d14caac14b38ddb07298e1c071f"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.698099 4922 generic.go:334] "Generic (PLEG): container finished" podID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerID="b462e55e4b1d1673313c6dfd787beceb0503ca800228d804b043cfabe37c1295" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.698136 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc","Type":"ContainerDied","Data":"b462e55e4b1d1673313c6dfd787beceb0503ca800228d804b043cfabe37c1295"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.698150 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc","Type":"ContainerDied","Data":"5e99700beb17dd192eb2195ba9c3daa3841a73599e60487affa058d659a1cad7"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.698162 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e99700beb17dd192eb2195ba9c3daa3841a73599e60487affa058d659a1cad7" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.706431 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-f279-account-create-update-jh2bs"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.719048 4922 generic.go:334] "Generic (PLEG): container finished" podID="02d26a97-d447-4f76-90ed-9357e343cd91" containerID="ca51681097b612d4dfe7b851461b5269b74ed8bfc135acb38f4eda642ba424ca" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.719103 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f78c88b9d-zp4nm" event={"ID":"02d26a97-d447-4f76-90ed-9357e343cd91","Type":"ContainerDied","Data":"ca51681097b612d4dfe7b851461b5269b74ed8bfc135acb38f4eda642ba424ca"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.719130 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f78c88b9d-zp4nm" event={"ID":"02d26a97-d447-4f76-90ed-9357e343cd91","Type":"ContainerDied","Data":"e011d58685304fd3c603589c2619a6c538d8044a6ab6cc02422272391c262951"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.719141 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e011d58685304fd3c603589c2619a6c538d8044a6ab6cc02422272391c262951" Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.731720 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.734483 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.734735 4922 generic.go:334] "Generic (PLEG): container finished" podID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerID="b1de83b3a6f902b036e25483c19523a902ba3f397b59a81a79e88727c51fa4bb" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.734801 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c038b865-4b32-4be3-9e0a-8c40dc140a68","Type":"ContainerDied","Data":"b1de83b3a6f902b036e25483c19523a902ba3f397b59a81a79e88727c51fa4bb"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.734828 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c038b865-4b32-4be3-9e0a-8c40dc140a68","Type":"ContainerDied","Data":"f447d48c61d66ccf6c46935127264a15891bc65cafff24e3f5f7f56ca42f5a25"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.734840 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f447d48c61d66ccf6c46935127264a15891bc65cafff24e3f5f7f56ca42f5a25" Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.739171 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.739202 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="ovn-northd" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.745628 4922 generic.go:334] "Generic (PLEG): container finished" podID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerID="8d636352f04a5d6b017ec6d07127ee84924adb2187df00d394fe1e1c6dd31678" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.745677 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7947bcd956-482dv" event={"ID":"1d97532b-e9ff-4031-a82c-3db5e943bfd9","Type":"ContainerDied","Data":"8d636352f04a5d6b017ec6d07127ee84924adb2187df00d394fe1e1c6dd31678"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.745700 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7947bcd956-482dv" event={"ID":"1d97532b-e9ff-4031-a82c-3db5e943bfd9","Type":"ContainerDied","Data":"b3b3ec913b61a7608df44accca83fe6463e5e0433ec895ffe07bfa1b397cf3e0"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.745709 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3b3ec913b61a7608df44accca83fe6463e5e0433ec895ffe07bfa1b397cf3e0" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.748102 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjjxw\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-kube-api-access-tjjxw\") pod \"c10f3b66-a7e0-4690-939a-5938de689b3a\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.748133 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-config-data\") pod \"c10f3b66-a7e0-4690-939a-5938de689b3a\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.748212 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-public-tls-certs\") pod \"c10f3b66-a7e0-4690-939a-5938de689b3a\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.748263 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-combined-ca-bundle\") pod \"c10f3b66-a7e0-4690-939a-5938de689b3a\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.748315 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-run-httpd\") pod \"c10f3b66-a7e0-4690-939a-5938de689b3a\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.748329 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-etc-swift\") pod \"c10f3b66-a7e0-4690-939a-5938de689b3a\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.748403 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-internal-tls-certs\") pod \"c10f3b66-a7e0-4690-939a-5938de689b3a\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.748450 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-log-httpd\") pod \"c10f3b66-a7e0-4690-939a-5938de689b3a\" (UID: \"c10f3b66-a7e0-4690-939a-5938de689b3a\") " Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.749172 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c10f3b66-a7e0-4690-939a-5938de689b3a" (UID: "c10f3b66-a7e0-4690-939a-5938de689b3a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.753487 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-kube-api-access-tjjxw" (OuterVolumeSpecName: "kube-api-access-tjjxw") pod "c10f3b66-a7e0-4690-939a-5938de689b3a" (UID: "c10f3b66-a7e0-4690-939a-5938de689b3a"). InnerVolumeSpecName "kube-api-access-tjjxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.753681 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c10f3b66-a7e0-4690-939a-5938de689b3a" (UID: "c10f3b66-a7e0-4690-939a-5938de689b3a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.755754 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "c10f3b66-a7e0-4690-939a-5938de689b3a" (UID: "c10f3b66-a7e0-4690-939a-5938de689b3a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.775078 4922 generic.go:334] "Generic (PLEG): container finished" podID="b2566655-e076-471c-af4c-1e218f70ebe1" containerID="94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.775138 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b2566655-e076-471c-af4c-1e218f70ebe1","Type":"ContainerDied","Data":"94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.780875 4922 generic.go:334] "Generic (PLEG): container finished" podID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerID="5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd" exitCode=143 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.780926 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b597c9f45-g422b" event={"ID":"6ef73d39-2ed2-4168-8598-e0749aa0a26b","Type":"ContainerDied","Data":"5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.788683 4922 generic.go:334] "Generic (PLEG): container finished" podID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerID="a81a1de4776394ff567145b64398d5b889f141b8c8414e1def8da4f9987ce3f1" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.788741 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e1382f2-6597-4c09-a171-8709e4b9f5f7","Type":"ContainerDied","Data":"a81a1de4776394ff567145b64398d5b889f141b8c8414e1def8da4f9987ce3f1"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.788773 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7e1382f2-6597-4c09-a171-8709e4b9f5f7","Type":"ContainerDied","Data":"2a47a2154388c1c5b146cd321711e72042e52118966dec26aeedd6cbdb3d38cf"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.788786 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a47a2154388c1c5b146cd321711e72042e52118966dec26aeedd6cbdb3d38cf" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.840808 4922 generic.go:334] "Generic (PLEG): container finished" podID="454a2683-850f-4ce0-8ebe-7758105dd255" containerID="44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c" exitCode=143 Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.840894 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" event={"ID":"454a2683-850f-4ce0-8ebe-7758105dd255","Type":"ContainerDied","Data":"44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.844575 4922 scope.go:117] "RemoveContainer" containerID="070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede" Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.846681 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede\": container with ID starting with 070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede not found: ID does not exist" containerID="070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.846710 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede"} err="failed to get container status \"070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede\": rpc error: code = NotFound desc = could not find container \"070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede\": container with ID starting with 070987feaf271fc91c9916a76b022635a4c92b91a9f70a5a74eccab2983e3ede not found: ID does not exist" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.846729 4922 scope.go:117] "RemoveContainer" containerID="e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.849156 4922 generic.go:334] "Generic (PLEG): container finished" podID="7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" containerID="6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11" exitCode=0 Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.849358 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114\": container with ID starting with e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114 not found: ID does not exist" containerID="e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.849430 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114"} err="failed to get container status \"e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114\": rpc error: code = NotFound desc = could not find container \"e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114\": container with ID starting with e6868b48a7fdc4bd6127081507fdf9eb8d100fe7676d439af911bfdac8246114 not found: ID does not exist" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.849463 4922 scope.go:117] "RemoveContainer" containerID="b461584211ac1833270ce57012470931433410147f21b7bbc92e13735e9d4731" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.849476 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11","Type":"ContainerDied","Data":"6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11"} Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.850945 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjjxw\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-kube-api-access-tjjxw\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.850963 4922 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.850972 4922 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c10f3b66-a7e0-4690-939a-5938de689b3a-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.850981 4922 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c10f3b66-a7e0-4690-939a-5938de689b3a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.886493 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c10f3b66-a7e0-4690-939a-5938de689b3a" (UID: "c10f3b66-a7e0-4690-939a-5938de689b3a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.890936 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.890963 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.892319 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c10f3b66-a7e0-4690-939a-5938de689b3a" (UID: "c10f3b66-a7e0-4690-939a-5938de689b3a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.898668 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-config-data" (OuterVolumeSpecName: "config-data") pod "c10f3b66-a7e0-4690-939a-5938de689b3a" (UID: "c10f3b66-a7e0-4690-939a-5938de689b3a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.933391 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c10f3b66-a7e0-4690-939a-5938de689b3a" (UID: "c10f3b66-a7e0-4690-939a-5938de689b3a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.954438 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.955425 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.955440 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.955448 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10f3b66-a7e0-4690-939a-5938de689b3a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.955974 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.956134 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.958411 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.958623 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.962609 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.962676 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.962724 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.962719 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.965375 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11 is running failed: container process not found" containerID="6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.969398 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11 is running failed: container process not found" containerID="6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.970263 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11 is running failed: container process not found" containerID="6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 07:19:26 crc kubenswrapper[4922]: E1128 07:19:26.970322 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" containerName="nova-scheduler-scheduler" Nov 28 07:19:26 crc kubenswrapper[4922]: I1128 07:19:26.983115 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" containerName="galera" containerID="cri-o://52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e" gracePeriod=30 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.026886 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.066498 4922 scope.go:117] "RemoveContainer" containerID="38e3e9f9a924e02c5e76f4a6a76132d9a4596633913f1d0199cbf2b940a82fc6" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.106865 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.108322 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.141609 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.154265 4922 scope.go:117] "RemoveContainer" containerID="4f746f52686852078d4054015bbe108873cac8ffaf0e003c3bb348ac22540b3b" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.160799 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c038b865-4b32-4be3-9e0a-8c40dc140a68-etc-machine-id\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.160852 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.160895 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-internal-tls-certs\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.160909 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c038b865-4b32-4be3-9e0a-8c40dc140a68-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.160999 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c038b865-4b32-4be3-9e0a-8c40dc140a68-logs\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.161046 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-combined-ca-bundle\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.161109 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6kxw\" (UniqueName: \"kubernetes.io/projected/c038b865-4b32-4be3-9e0a-8c40dc140a68-kube-api-access-c6kxw\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.161135 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-scripts\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.161165 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-public-tls-certs\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.161185 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data-custom\") pod \"c038b865-4b32-4be3-9e0a-8c40dc140a68\" (UID: \"c038b865-4b32-4be3-9e0a-8c40dc140a68\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.161561 4922 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c038b865-4b32-4be3-9e0a-8c40dc140a68-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.163046 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.163177 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.168928 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c038b865-4b32-4be3-9e0a-8c40dc140a68-logs" (OuterVolumeSpecName: "logs") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.181099 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.191420 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.197458 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.199131 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.205878 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc646c8f9-nfgpb"] Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.211693 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c038b865-4b32-4be3-9e0a-8c40dc140a68-kube-api-access-c6kxw" (OuterVolumeSpecName: "kube-api-access-c6kxw") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "kube-api-access-c6kxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.220538 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.229921 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-scripts" (OuterVolumeSpecName: "scripts") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.230007 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc646c8f9-nfgpb"] Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.235258 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.235566 4922 scope.go:117] "RemoveContainer" containerID="fbdb24f0e3359c3008db7b03afd1ec4313fbb097c406925f05c1bc8c06c01499" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.237814 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.246396 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.251642 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262084 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-combined-ca-bundle\") pod \"182970fb-401f-404c-81c1-db0294b02167\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262115 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-internal-tls-certs\") pod \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262142 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262170 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-combined-ca-bundle\") pod \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262190 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-public-tls-certs\") pod \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262206 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data\") pod \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262262 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rdg5\" (UniqueName: \"kubernetes.io/projected/182970fb-401f-404c-81c1-db0294b02167-kube-api-access-5rdg5\") pod \"182970fb-401f-404c-81c1-db0294b02167\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262295 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkc4c\" (UniqueName: \"kubernetes.io/projected/1d97532b-e9ff-4031-a82c-3db5e943bfd9-kube-api-access-dkc4c\") pod \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262317 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-combined-ca-bundle\") pod \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262334 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-scripts\") pod \"182970fb-401f-404c-81c1-db0294b02167\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262355 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"182970fb-401f-404c-81c1-db0294b02167\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262368 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-scripts\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262387 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d97532b-e9ff-4031-a82c-3db5e943bfd9-logs\") pod \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262405 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-combined-ca-bundle\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262425 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-httpd-run\") pod \"182970fb-401f-404c-81c1-db0294b02167\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262452 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-combined-ca-bundle\") pod \"02d26a97-d447-4f76-90ed-9357e343cd91\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262469 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-internal-tls-certs\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262485 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data-custom\") pod \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\" (UID: \"1d97532b-e9ff-4031-a82c-3db5e943bfd9\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262501 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6snvh\" (UniqueName: \"kubernetes.io/projected/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-kube-api-access-6snvh\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262553 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-logs\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262584 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcgtm\" (UniqueName: \"kubernetes.io/projected/02d26a97-d447-4f76-90ed-9357e343cd91-kube-api-access-pcgtm\") pod \"02d26a97-d447-4f76-90ed-9357e343cd91\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262603 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-httpd-run\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262622 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-nova-metadata-tls-certs\") pod \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262646 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-config-data\") pod \"02d26a97-d447-4f76-90ed-9357e343cd91\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262665 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e1382f2-6597-4c09-a171-8709e4b9f5f7-logs\") pod \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262687 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02d26a97-d447-4f76-90ed-9357e343cd91-logs\") pod \"02d26a97-d447-4f76-90ed-9357e343cd91\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262721 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-config-data\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262739 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-public-tls-certs\") pod \"02d26a97-d447-4f76-90ed-9357e343cd91\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262762 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-scripts\") pod \"02d26a97-d447-4f76-90ed-9357e343cd91\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262776 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxzpk\" (UniqueName: \"kubernetes.io/projected/7e1382f2-6597-4c09-a171-8709e4b9f5f7-kube-api-access-fxzpk\") pod \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262792 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-config-data\") pod \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\" (UID: \"7e1382f2-6597-4c09-a171-8709e4b9f5f7\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262805 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-internal-tls-certs\") pod \"02d26a97-d447-4f76-90ed-9357e343cd91\" (UID: \"02d26a97-d447-4f76-90ed-9357e343cd91\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262827 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-public-tls-certs\") pod \"182970fb-401f-404c-81c1-db0294b02167\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262848 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-config-data\") pod \"182970fb-401f-404c-81c1-db0294b02167\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.262868 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-logs\") pod \"182970fb-401f-404c-81c1-db0294b02167\" (UID: \"182970fb-401f-404c-81c1-db0294b02167\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.263204 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6kxw\" (UniqueName: \"kubernetes.io/projected/c038b865-4b32-4be3-9e0a-8c40dc140a68-kube-api-access-c6kxw\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.263224 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.263234 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.263294 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c038b865-4b32-4be3-9e0a-8c40dc140a68-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.264232 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-logs" (OuterVolumeSpecName: "logs") pod "182970fb-401f-404c-81c1-db0294b02167" (UID: "182970fb-401f-404c-81c1-db0294b02167"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.270036 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d97532b-e9ff-4031-a82c-3db5e943bfd9-logs" (OuterVolumeSpecName: "logs") pod "1d97532b-e9ff-4031-a82c-3db5e943bfd9" (UID: "1d97532b-e9ff-4031-a82c-3db5e943bfd9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.270737 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02d26a97-d447-4f76-90ed-9357e343cd91-logs" (OuterVolumeSpecName: "logs") pod "02d26a97-d447-4f76-90ed-9357e343cd91" (UID: "02d26a97-d447-4f76-90ed-9357e343cd91"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.284542 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-logs" (OuterVolumeSpecName: "logs") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.285636 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "182970fb-401f-404c-81c1-db0294b02167" (UID: "182970fb-401f-404c-81c1-db0294b02167"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.289659 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e1382f2-6597-4c09-a171-8709e4b9f5f7-logs" (OuterVolumeSpecName: "logs") pod "7e1382f2-6597-4c09-a171-8709e4b9f5f7" (UID: "7e1382f2-6597-4c09-a171-8709e4b9f5f7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.294608 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.317938 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.317992 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.318037 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.318725 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.318774 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" gracePeriod=600 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.319199 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-kube-api-access-6snvh" (OuterVolumeSpecName: "kube-api-access-6snvh") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "kube-api-access-6snvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.320214 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1d97532b-e9ff-4031-a82c-3db5e943bfd9" (UID: "1d97532b-e9ff-4031-a82c-3db5e943bfd9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.326250 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-scripts" (OuterVolumeSpecName: "scripts") pod "182970fb-401f-404c-81c1-db0294b02167" (UID: "182970fb-401f-404c-81c1-db0294b02167"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.366581 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.366678 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-combined-ca-bundle\") pod \"f672d6bb-97fc-4547-a14b-af27d631fe2a\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.366742 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5f5t\" (UniqueName: \"kubernetes.io/projected/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-kube-api-access-x5f5t\") pod \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.366841 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-combined-ca-bundle\") pod \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.366900 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-config\") pod \"f672d6bb-97fc-4547-a14b-af27d631fe2a\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.366934 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-internal-tls-certs\") pod \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.366957 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed30601b-1e7c-4aa6-8469-8ff61cd93253-logs\") pod \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367026 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-config-data\") pod \"b2566655-e076-471c-af4c-1e218f70ebe1\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367055 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm2rq\" (UniqueName: \"kubernetes.io/projected/b2566655-e076-471c-af4c-1e218f70ebe1-kube-api-access-sm2rq\") pod \"b2566655-e076-471c-af4c-1e218f70ebe1\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367093 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-certs\") pod \"f672d6bb-97fc-4547-a14b-af27d631fe2a\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367141 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lhk5\" (UniqueName: \"kubernetes.io/projected/ed30601b-1e7c-4aa6-8469-8ff61cd93253-kube-api-access-9lhk5\") pod \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367165 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-combined-ca-bundle\") pod \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367193 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-combined-ca-bundle\") pod \"b2566655-e076-471c-af4c-1e218f70ebe1\" (UID: \"b2566655-e076-471c-af4c-1e218f70ebe1\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367251 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-public-tls-certs\") pod \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367288 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-config-data\") pod \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\" (UID: \"ed30601b-1e7c-4aa6-8469-8ff61cd93253\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367332 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqp4r\" (UniqueName: \"kubernetes.io/projected/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-api-access-sqp4r\") pod \"f672d6bb-97fc-4547-a14b-af27d631fe2a\" (UID: \"f672d6bb-97fc-4547-a14b-af27d631fe2a\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367408 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-config-data\") pod \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\" (UID: \"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.367429 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\" (UID: \"0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc\") " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368075 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368089 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d97532b-e9ff-4031-a82c-3db5e943bfd9-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368098 4922 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368110 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368121 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6snvh\" (UniqueName: \"kubernetes.io/projected/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-kube-api-access-6snvh\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368135 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368144 4922 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368183 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e1382f2-6597-4c09-a171-8709e4b9f5f7-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368194 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/02d26a97-d447-4f76-90ed-9357e343cd91-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.368204 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/182970fb-401f-404c-81c1-db0294b02167-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.369796 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "182970fb-401f-404c-81c1-db0294b02167" (UID: "182970fb-401f-404c-81c1-db0294b02167"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.382008 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed30601b-1e7c-4aa6-8469-8ff61cd93253-logs" (OuterVolumeSpecName: "logs") pod "ed30601b-1e7c-4aa6-8469-8ff61cd93253" (UID: "ed30601b-1e7c-4aa6-8469-8ff61cd93253"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.382074 4922 scope.go:117] "RemoveContainer" containerID="ed684f6b629f4e8652e956f602ceebf88d808cf52630b1ec4ad72baf8b709140" Nov 28 07:19:27 crc kubenswrapper[4922]: W1128 07:19:27.382306 4922 mount_helper_common.go:34] Warning: mount cleanup skipped because path does not exist: /var/lib/kubelet/pods/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc/volumes/kubernetes.io~local-volume/local-storage10-crc Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.382318 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "glance") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.389480 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-scripts" (OuterVolumeSpecName: "scripts") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.394079 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed30601b-1e7c-4aa6-8469-8ff61cd93253-kube-api-access-9lhk5" (OuterVolumeSpecName: "kube-api-access-9lhk5") pod "ed30601b-1e7c-4aa6-8469-8ff61cd93253" (UID: "ed30601b-1e7c-4aa6-8469-8ff61cd93253"). InnerVolumeSpecName "kube-api-access-9lhk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.400979 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.426151 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/182970fb-401f-404c-81c1-db0294b02167-kube-api-access-5rdg5" (OuterVolumeSpecName: "kube-api-access-5rdg5") pod "182970fb-401f-404c-81c1-db0294b02167" (UID: "182970fb-401f-404c-81c1-db0294b02167"). InnerVolumeSpecName "kube-api-access-5rdg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.444293 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2566655-e076-471c-af4c-1e218f70ebe1-kube-api-access-sm2rq" (OuterVolumeSpecName: "kube-api-access-sm2rq") pod "b2566655-e076-471c-af4c-1e218f70ebe1" (UID: "b2566655-e076-471c-af4c-1e218f70ebe1"). InnerVolumeSpecName "kube-api-access-sm2rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.446434 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d97532b-e9ff-4031-a82c-3db5e943bfd9-kube-api-access-dkc4c" (OuterVolumeSpecName: "kube-api-access-dkc4c") pod "1d97532b-e9ff-4031-a82c-3db5e943bfd9" (UID: "1d97532b-e9ff-4031-a82c-3db5e943bfd9"). InnerVolumeSpecName "kube-api-access-dkc4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.461325 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-scripts" (OuterVolumeSpecName: "scripts") pod "02d26a97-d447-4f76-90ed-9357e343cd91" (UID: "02d26a97-d447-4f76-90ed-9357e343cd91"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.462168 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02d26a97-d447-4f76-90ed-9357e343cd91-kube-api-access-pcgtm" (OuterVolumeSpecName: "kube-api-access-pcgtm") pod "02d26a97-d447-4f76-90ed-9357e343cd91" (UID: "02d26a97-d447-4f76-90ed-9357e343cd91"). InnerVolumeSpecName "kube-api-access-pcgtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.462230 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e1382f2-6597-4c09-a171-8709e4b9f5f7-kube-api-access-fxzpk" (OuterVolumeSpecName: "kube-api-access-fxzpk") pod "7e1382f2-6597-4c09-a171-8709e4b9f5f7" (UID: "7e1382f2-6597-4c09-a171-8709e4b9f5f7"). InnerVolumeSpecName "kube-api-access-fxzpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.475249 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04965f12-78a7-459e-bbd6-0c716678f561" path="/var/lib/kubelet/pods/04965f12-78a7-459e-bbd6-0c716678f561/volumes" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.483608 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-kube-api-access-x5f5t" (OuterVolumeSpecName: "kube-api-access-x5f5t") pod "7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" (UID: "7c0f0857-2ca9-49c8-90ac-1351b2ee2f11"). InnerVolumeSpecName "kube-api-access-x5f5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.483966 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a50cebf-c40b-425a-86a1-7813277f1b5a" path="/var/lib/kubelet/pods/2a50cebf-c40b-425a-86a1-7813277f1b5a/volumes" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485703 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcgtm\" (UniqueName: \"kubernetes.io/projected/02d26a97-d447-4f76-90ed-9357e343cd91-kube-api-access-pcgtm\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485725 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm2rq\" (UniqueName: \"kubernetes.io/projected/b2566655-e076-471c-af4c-1e218f70ebe1-kube-api-access-sm2rq\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485737 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lhk5\" (UniqueName: \"kubernetes.io/projected/ed30601b-1e7c-4aa6-8469-8ff61cd93253-kube-api-access-9lhk5\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485746 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485756 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxzpk\" (UniqueName: \"kubernetes.io/projected/7e1382f2-6597-4c09-a171-8709e4b9f5f7-kube-api-access-fxzpk\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485775 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485785 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5f5t\" (UniqueName: \"kubernetes.io/projected/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-kube-api-access-x5f5t\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485795 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rdg5\" (UniqueName: \"kubernetes.io/projected/182970fb-401f-404c-81c1-db0294b02167-kube-api-access-5rdg5\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485813 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkc4c\" (UniqueName: \"kubernetes.io/projected/1d97532b-e9ff-4031-a82c-3db5e943bfd9-kube-api-access-dkc4c\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485825 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485835 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485843 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ed30601b-1e7c-4aa6-8469-8ff61cd93253-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.485872 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33e395bd-1a5e-4c48-829d-e7b3408e9b8e" path="/var/lib/kubelet/pods/33e395bd-1a5e-4c48-829d-e7b3408e9b8e/volumes" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.486380 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" path="/var/lib/kubelet/pods/678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05/volumes" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.487046 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a229aad-3dbd-40d3-85ef-38fe07deaf5b" path="/var/lib/kubelet/pods/7a229aad-3dbd-40d3-85ef-38fe07deaf5b/volumes" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.488285 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b8dd495-8375-4779-9d6f-db1c25affa16" path="/var/lib/kubelet/pods/9b8dd495-8375-4779-9d6f-db1c25affa16/volumes" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.488892 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" path="/var/lib/kubelet/pods/d27299ac-7d8d-4485-86fb-6ac7f34ea1ae/volumes" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.496534 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-api-access-sqp4r" (OuterVolumeSpecName: "kube-api-access-sqp4r") pod "f672d6bb-97fc-4547-a14b-af27d631fe2a" (UID: "f672d6bb-97fc-4547-a14b-af27d631fe2a"). InnerVolumeSpecName "kube-api-access-sqp4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: E1128 07:19:27.573196 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.589195 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqp4r\" (UniqueName: \"kubernetes.io/projected/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-api-access-sqp4r\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:27 crc kubenswrapper[4922]: E1128 07:19:27.589299 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 07:19:27 crc kubenswrapper[4922]: E1128 07:19:27.589351 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data podName:4cf25acc-0d60-4b0a-a9c9-adc7ddce7458 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:35.589332644 +0000 UTC m=+1620.509728226 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data") pod "rabbitmq-server-0" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458") : configmap "rabbitmq-config-data" not found Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.737511 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-xqzrg" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.866202 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/placementdbc9-account-delete-68tbl" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.870797 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" containerID="cri-o://e36de2182dd35fee67fc5ebf689f54d2adc2e37e33fa381890f944ad027f5c3a" gracePeriod=30 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.870943 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" containerID="cri-o://076165d9817bbf196b798dd4cf1a2a4dea75e096874598893a033392c7a13f59" gracePeriod=30 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.887596 4922 generic.go:334] "Generic (PLEG): container finished" podID="8d12d285-16c4-4e64-98d8-cff0f581aee4" containerID="6327f387153de8972c997a6ac2a21401ab00a932a9990342cf61d1e499fcf45e" exitCode=0 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.889908 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.891793 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.896180 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placementdbc9-account-delete-68tbl" podStartSLOduration=7.896162798 podStartE2EDuration="7.896162798s" podCreationTimestamp="2025-11-28 07:19:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:19:27.88270009 +0000 UTC m=+1612.803095682" watchObservedRunningTime="2025-11-28 07:19:27.896162798 +0000 UTC m=+1612.816558380" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.909987 4922 generic.go:334] "Generic (PLEG): container finished" podID="f672d6bb-97fc-4547-a14b-af27d631fe2a" containerID="97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0" exitCode=2 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.910357 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podStartSLOduration=8.910321655 podStartE2EDuration="8.910321655s" podCreationTimestamp="2025-11-28 07:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:19:27.903336628 +0000 UTC m=+1612.823732210" watchObservedRunningTime="2025-11-28 07:19:27.910321655 +0000 UTC m=+1612.830717237" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.910473 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.918563 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" exitCode=0 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.921583 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/barbican5229-account-delete-qq87v" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.923132 4922 generic.go:334] "Generic (PLEG): container finished" podID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerID="46994e6ac2bb018930a78ad0cd1fe4646ebb6039110f4ddf448db9a5fa5fd689" exitCode=0 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.923160 4922 generic.go:334] "Generic (PLEG): container finished" podID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerID="1e15529dfacc098964836d9b402c0c6f8dfa68f13f57273de3bff5e93b296db5" exitCode=2 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.923167 4922 generic.go:334] "Generic (PLEG): container finished" podID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerID="76e1e7d5729fdebb0173eee23985b2e33e4bffc2543f1da0f92592472e21c4ec" exitCode=0 Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.924941 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.926417 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapi78f1-account-delete-mwv4p" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.931446 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7947bcd956-482dv" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.931476 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.931914 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-cc5b55cb5-8tgkn" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.931967 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.932023 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.932048 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f78c88b9d-zp4nm" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.932253 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.933893 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6bc646c8f9-nfgpb" podUID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.195:5353: i/o timeout" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.944194 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican5229-account-delete-qq87v" podStartSLOduration=7.944169235 podStartE2EDuration="7.944169235s" podCreationTimestamp="2025-11-28 07:19:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:19:27.933815769 +0000 UTC m=+1612.854211371" watchObservedRunningTime="2025-11-28 07:19:27.944169235 +0000 UTC m=+1612.864564837" Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.965057 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novaapi78f1-account-delete-mwv4p" podStartSLOduration=6.9650437400000005 podStartE2EDuration="6.96504374s" podCreationTimestamp="2025-11-28 07:19:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:19:27.96164062 +0000 UTC m=+1612.882036202" watchObservedRunningTime="2025-11-28 07:19:27.96504374 +0000 UTC m=+1612.885439322" Nov 28 07:19:27 crc kubenswrapper[4922]: E1128 07:19:27.969767 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.970364 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-xqzrg" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerName="ovn-controller" probeResult="failure" output=< Nov 28 07:19:27 crc kubenswrapper[4922]: ERROR - Failed to get connection status from ovn-controller, ovn-appctl exit status: 0 Nov 28 07:19:27 crc kubenswrapper[4922]: > Nov 28 07:19:27 crc kubenswrapper[4922]: I1128 07:19:27.978321 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2566655-e076-471c-af4c-1e218f70ebe1" (UID: "b2566655-e076-471c-af4c-1e218f70ebe1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:27 crc kubenswrapper[4922]: E1128 07:19:27.979265 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:27 crc kubenswrapper[4922]: E1128 07:19:27.981960 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 07:19:27 crc kubenswrapper[4922]: E1128 07:19:27.981985 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" containerName="nova-cell0-conductor-conductor" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.005971 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.020018 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.020079 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts podName:ca6d488f-6085-4e22-a325-1b749d8c154c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:28.520063724 +0000 UTC m=+1613.440459306 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts") pod "placementdbc9-account-delete-68tbl" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c") : configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.020247 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.020309 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts podName:3e442fd0-cd46-4c04-afb3-96892d39c0f4 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:28.52027083 +0000 UTC m=+1613.440666412 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts") pod "barbican5229-account-delete-qq87v" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4") : configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.020343 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.020390 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts podName:2425a44a-c3c8-4533-9aa6-deb657556efb nodeName:}" failed. No retries permitted until 2025-11-28 07:19:28.520382423 +0000 UTC m=+1613.440778005 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts") pod "novaapi78f1-account-delete-mwv4p" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb") : configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.020410 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.020739 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.023886 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-lpw65" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="registry-server" probeResult="failure" output=< Nov 28 07:19:28 crc kubenswrapper[4922]: timeout: failed to connect service ":50051" within 1s Nov 28 07:19:28 crc kubenswrapper[4922]: > Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.064575 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data" (OuterVolumeSpecName: "config-data") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.082510 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-config-data" (OuterVolumeSpecName: "config-data") pod "b2566655-e076-471c-af4c-1e218f70ebe1" (UID: "b2566655-e076-471c-af4c-1e218f70ebe1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.095226 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "f672d6bb-97fc-4547-a14b-af27d631fe2a" (UID: "f672d6bb-97fc-4547-a14b-af27d631fe2a"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.117066 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.126630 4922 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.126670 4922 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.126716 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data podName:99708a5d-57d5-4479-8e09-94428bb13fa3 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:36.126698201 +0000 UTC m=+1621.047093783 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data") pod "rabbitmq-cell1-server-0" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3") : configmap "rabbitmq-cell1-config-data" not found Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.127030 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2566655-e076-471c-af4c-1e218f70ebe1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.127052 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.127064 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.189948 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d97532b-e9ff-4031-a82c-3db5e943bfd9" (UID: "1d97532b-e9ff-4031-a82c-3db5e943bfd9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.230449 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.231419 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ed30601b-1e7c-4aa6-8469-8ff61cd93253" (UID: "ed30601b-1e7c-4aa6-8469-8ff61cd93253"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.231499 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.332425 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.332453 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.350910 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.409381 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1d97532b-e9ff-4031-a82c-3db5e943bfd9" (UID: "1d97532b-e9ff-4031-a82c-3db5e943bfd9"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.436967 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.437726 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.443435 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.451388 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "1d97532b-e9ff-4031-a82c-3db5e943bfd9" (UID: "1d97532b-e9ff-4031-a82c-3db5e943bfd9"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.467198 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed30601b-1e7c-4aa6-8469-8ff61cd93253" (UID: "ed30601b-1e7c-4aa6-8469-8ff61cd93253"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.468349 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "7e1382f2-6597-4c09-a171-8709e4b9f5f7" (UID: "7e1382f2-6597-4c09-a171-8709e4b9f5f7"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.480469 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data" (OuterVolumeSpecName: "config-data") pod "1d97532b-e9ff-4031-a82c-3db5e943bfd9" (UID: "1d97532b-e9ff-4031-a82c-3db5e943bfd9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.528256 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-config-data" (OuterVolumeSpecName: "config-data") pod "ed30601b-1e7c-4aa6-8469-8ff61cd93253" (UID: "ed30601b-1e7c-4aa6-8469-8ff61cd93253"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.531440 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "182970fb-401f-404c-81c1-db0294b02167" (UID: "182970fb-401f-404c-81c1-db0294b02167"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.531492 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-config-data" (OuterVolumeSpecName: "config-data") pod "7e1382f2-6597-4c09-a171-8709e4b9f5f7" (UID: "7e1382f2-6597-4c09-a171-8709e4b9f5f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.536501 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e1382f2-6597-4c09-a171-8709e4b9f5f7" (UID: "7e1382f2-6597-4c09-a171-8709e4b9f5f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.538930 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.538960 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.538971 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.538983 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.538998 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.539009 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.539021 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d97532b-e9ff-4031-a82c-3db5e943bfd9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.539032 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.539042 4922 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e1382f2-6597-4c09-a171-8709e4b9f5f7-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.539106 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.539158 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts podName:ca6d488f-6085-4e22-a325-1b749d8c154c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:29.539140325 +0000 UTC m=+1614.459535977 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts") pod "placementdbc9-account-delete-68tbl" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c") : configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.539188 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.539247 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.539283 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts podName:3e442fd0-cd46-4c04-afb3-96892d39c0f4 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:29.539225877 +0000 UTC m=+1614.459621459 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts") pod "barbican5229-account-delete-qq87v" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4") : configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.539326 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts podName:2425a44a-c3c8-4533-9aa6-deb657556efb nodeName:}" failed. No retries permitted until 2025-11-28 07:19:29.539309869 +0000 UTC m=+1614.459705451 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts") pod "novaapi78f1-account-delete-mwv4p" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb") : configmap "openstack-scripts" not found Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.587586 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-config-data" (OuterVolumeSpecName: "config-data") pod "7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" (UID: "7c0f0857-2ca9-49c8-90ac-1351b2ee2f11"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.588168 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f672d6bb-97fc-4547-a14b-af27d631fe2a" (UID: "f672d6bb-97fc-4547-a14b-af27d631fe2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.597075 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ed30601b-1e7c-4aa6-8469-8ff61cd93253" (UID: "ed30601b-1e7c-4aa6-8469-8ff61cd93253"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.607097 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.616525 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.634987 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-config-data" (OuterVolumeSpecName: "config-data") pod "182970fb-401f-404c-81c1-db0294b02167" (UID: "182970fb-401f-404c-81c1-db0294b02167"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.640708 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.640731 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.640740 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.640752 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.640760 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed30601b-1e7c-4aa6-8469-8ff61cd93253-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.643794 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "f672d6bb-97fc-4547-a14b-af27d631fe2a" (UID: "f672d6bb-97fc-4547-a14b-af27d631fe2a"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.646879 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" (UID: "7c0f0857-2ca9-49c8-90ac-1351b2ee2f11"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.676824 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c038b865-4b32-4be3-9e0a-8c40dc140a68" (UID: "c038b865-4b32-4be3-9e0a-8c40dc140a68"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.682034 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "02d26a97-d447-4f76-90ed-9357e343cd91" (UID: "02d26a97-d447-4f76-90ed-9357e343cd91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.682531 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-config-data" (OuterVolumeSpecName: "config-data") pod "02d26a97-d447-4f76-90ed-9357e343cd91" (UID: "02d26a97-d447-4f76-90ed-9357e343cd91"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.700818 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-config-data" (OuterVolumeSpecName: "config-data") pod "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" (UID: "0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.706399 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "182970fb-401f-404c-81c1-db0294b02167" (UID: "182970fb-401f-404c-81c1-db0294b02167"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.742787 4922 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f672d6bb-97fc-4547-a14b-af27d631fe2a-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.742814 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.742823 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.742831 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/182970fb-401f-404c-81c1-db0294b02167-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.742839 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.742847 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.742855 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c038b865-4b32-4be3-9e0a-8c40dc140a68-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.754703 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "02d26a97-d447-4f76-90ed-9357e343cd91" (UID: "02d26a97-d447-4f76-90ed-9357e343cd91"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.762864 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "02d26a97-d447-4f76-90ed-9357e343cd91" (UID: "02d26a97-d447-4f76-90ed-9357e343cd91"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.803273 4922 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.401s" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803320 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803340 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementdbc9-account-delete-68tbl" event={"ID":"ca6d488f-6085-4e22-a325-1b749d8c154c","Type":"ContainerStarted","Data":"bb3ce5985f855b9f6b316fd8b713d9eabb904b705a7cf10e089e680825de9d7f"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803362 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron5206-account-delete-64tk8" event={"ID":"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372","Type":"ContainerDied","Data":"9a6103901834cc16af5d88e6469564dcb347e6903d65d1a0a0aa50a2d30dd4a9"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803380 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a6103901834cc16af5d88e6469564dcb347e6903d65d1a0a0aa50a2d30dd4a9" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803447 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803461 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803484 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803497 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-757bbb5fbd-lx4kn" event={"ID":"f1e0e318-5b90-4c18-ba95-fc261ffb519d","Type":"ContainerStarted","Data":"076165d9817bbf196b798dd4cf1a2a4dea75e096874598893a033392c7a13f59"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803510 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803524 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803538 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803553 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8d12d285-16c4-4e64-98d8-cff0f581aee4","Type":"ContainerDied","Data":"6327f387153de8972c997a6ac2a21401ab00a932a9990342cf61d1e499fcf45e"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803568 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"b2566655-e076-471c-af4c-1e218f70ebe1","Type":"ContainerDied","Data":"819311f28c457a5a7faf754afca98ecda7331c7ca3e9872a95371658a82f12de"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803588 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c0f0857-2ca9-49c8-90ac-1351b2ee2f11","Type":"ContainerDied","Data":"6d71811e5741d52b835a8b9bf6419fd18e742986cfea62f14048e501ac950896"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803617 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f672d6bb-97fc-4547-a14b-af27d631fe2a","Type":"ContainerDied","Data":"97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803633 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f672d6bb-97fc-4547-a14b-af27d631fe2a","Type":"ContainerDied","Data":"00331edf10326990451fd6b99f8df5e28c199a8ac8a9c97d3c2a18b04fdc3517"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803644 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803663 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican5229-account-delete-qq87v" event={"ID":"3e442fd0-cd46-4c04-afb3-96892d39c0f4","Type":"ContainerStarted","Data":"48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803677 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerDied","Data":"46994e6ac2bb018930a78ad0cd1fe4646ebb6039110f4ddf448db9a5fa5fd689"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803699 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerDied","Data":"1e15529dfacc098964836d9b402c0c6f8dfa68f13f57273de3bff5e93b296db5"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803709 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerDied","Data":"76e1e7d5729fdebb0173eee23985b2e33e4bffc2543f1da0f92592472e21c4ec"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803719 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ed30601b-1e7c-4aa6-8469-8ff61cd93253","Type":"ContainerDied","Data":"e0df2ebd96b3d5d19928117437f5b4a2d59ac3de23850ed4adddc3d9deb76717"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803738 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi78f1-account-delete-mwv4p" event={"ID":"2425a44a-c3c8-4533-9aa6-deb657556efb","Type":"ContainerStarted","Data":"d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803755 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cindere247-account-delete-5kjlk" event={"ID":"1018f07c-38b9-440f-b126-26e59293e757","Type":"ContainerDied","Data":"0da564880ff2e86c3526a1c95f92a4bf1c199ab670d8f52be60cbd4ad3bef35a"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.803767 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0da564880ff2e86c3526a1c95f92a4bf1c199ab670d8f52be60cbd4ad3bef35a" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.807820 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:19:28 crc kubenswrapper[4922]: E1128 07:19:28.808028 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.831972 4922 scope.go:117] "RemoveContainer" containerID="240daa62f7498e10be12162ab84970d0b62d583627e61eb191c0b48920ae86ed" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.857767 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.857816 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/02d26a97-d447-4f76-90ed-9357e343cd91-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.951326 4922 generic.go:334] "Generic (PLEG): container finished" podID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerID="0b65d7751c631796afddb9d5cb6be8b33791f093200379b257e1458a02ef94be" exitCode=0 Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.951398 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458","Type":"ContainerDied","Data":"0b65d7751c631796afddb9d5cb6be8b33791f093200379b257e1458a02ef94be"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.953581 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8d12d285-16c4-4e64-98d8-cff0f581aee4","Type":"ContainerDied","Data":"af33c01520597a5c76e0a71520199a315868151f769652c34a44acedd0a51dfd"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.953682 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="af33c01520597a5c76e0a71520199a315868151f769652c34a44acedd0a51dfd" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.963375 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell032a6-account-delete-tcwgc" event={"ID":"5e4e7296-ad39-41c1-9399-b3c9072c9158","Type":"ContainerStarted","Data":"1b294e250b4e8005476292722797df8e5418fe5de28dbe00f75e79efc8faf395"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.966795 4922 generic.go:334] "Generic (PLEG): container finished" podID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerID="58bc4c962a13bd392e1f3d48869c89613df686531a50bf08290a8747044899ed" exitCode=0 Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.966848 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"99708a5d-57d5-4479-8e09-94428bb13fa3","Type":"ContainerDied","Data":"58bc4c962a13bd392e1f3d48869c89613df686531a50bf08290a8747044899ed"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.968380 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance75df-account-delete-wdw2x" event={"ID":"1f339784-df58-44f7-947d-9d80559c1c0c","Type":"ContainerStarted","Data":"e61f1fa7822b5f2b3605c61ec01fc8d12a428bfdcca76852ff14c1bcc915a5cd"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.981626 4922 generic.go:334] "Generic (PLEG): container finished" podID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerID="e36de2182dd35fee67fc5ebf689f54d2adc2e37e33fa381890f944ad027f5c3a" exitCode=143 Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.982720 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-757bbb5fbd-lx4kn" event={"ID":"f1e0e318-5b90-4c18-ba95-fc261ffb519d","Type":"ContainerDied","Data":"e36de2182dd35fee67fc5ebf689f54d2adc2e37e33fa381890f944ad027f5c3a"} Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.982586 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell032a6-account-delete-tcwgc" podStartSLOduration=7.982565233 podStartE2EDuration="7.982565233s" podCreationTimestamp="2025-11-28 07:19:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:19:28.97981095 +0000 UTC m=+1613.900206532" watchObservedRunningTime="2025-11-28 07:19:28.982565233 +0000 UTC m=+1613.902960815" Nov 28 07:19:28 crc kubenswrapper[4922]: I1128 07:19:28.992616 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/glance75df-account-delete-wdw2x" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.000038 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapi78f1-account-delete-mwv4p" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.000819 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novacell032a6-account-delete-tcwgc" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.000875 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/placementdbc9-account-delete-68tbl" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.001662 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/barbican5229-account-delete-qq87v" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.004175 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance75df-account-delete-wdw2x" podStartSLOduration=10.004164237 podStartE2EDuration="10.004164237s" podCreationTimestamp="2025-11-28 07:19:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:19:28.995708522 +0000 UTC m=+1613.916104104" watchObservedRunningTime="2025-11-28 07:19:29.004164237 +0000 UTC m=+1613.924559819" Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.166667 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.166735 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts podName:1f339784-df58-44f7-947d-9d80559c1c0c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:29.666716352 +0000 UTC m=+1614.587112014 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts") pod "glance75df-account-delete-wdw2x" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c") : configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.166915 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.168089 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts podName:5e4e7296-ad39-41c1-9399-b3c9072c9158 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:29.668065078 +0000 UTC m=+1614.588460660 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts") pod "novacell032a6-account-delete-tcwgc" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158") : configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.186795 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.220228 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.220402 4922 scope.go:117] "RemoveContainer" containerID="e866e4c558d425e16d02cae7a1249331e5e9aee144a65a24341d72360112484f" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.225027 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.238832 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.255811 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.258155 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-cc5b55cb5-8tgkn"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.267860 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1018f07c-38b9-440f-b126-26e59293e757-operator-scripts\") pod \"1018f07c-38b9-440f-b126-26e59293e757\" (UID: \"1018f07c-38b9-440f-b126-26e59293e757\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.267961 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfkfd\" (UniqueName: \"kubernetes.io/projected/1018f07c-38b9-440f-b126-26e59293e757-kube-api-access-wfkfd\") pod \"1018f07c-38b9-440f-b126-26e59293e757\" (UID: \"1018f07c-38b9-440f-b126-26e59293e757\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.268674 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1018f07c-38b9-440f-b126-26e59293e757-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1018f07c-38b9-440f-b126-26e59293e757" (UID: "1018f07c-38b9-440f-b126-26e59293e757"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.269091 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1018f07c-38b9-440f-b126-26e59293e757-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.278366 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.281327 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-cc5b55cb5-8tgkn"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.284464 4922 scope.go:117] "RemoveContainer" containerID="165900f07622025ebdb7502eec91c0d20df88aaec6da0163ef170ae871ddc19f" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.286623 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1018f07c-38b9-440f-b126-26e59293e757-kube-api-access-wfkfd" (OuterVolumeSpecName: "kube-api-access-wfkfd") pod "1018f07c-38b9-440f-b126-26e59293e757" (UID: "1018f07c-38b9-440f-b126-26e59293e757"). InnerVolumeSpecName "kube-api-access-wfkfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.293945 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7947bcd956-482dv"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.295034 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.308499 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7947bcd956-482dv"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.329341 4922 scope.go:117] "RemoveContainer" containerID="94b76863ba343d4c01e9d0ccc2f68e6d6a4daef999bf262b9a27c34ee538c8c3" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.357645 4922 scope.go:117] "RemoveContainer" containerID="6595b56740c828b4071cf0e6ba075ef2934efd23c448c0814e7743491287af11" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.368086 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369698 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-plugins\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369776 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnkvs\" (UniqueName: \"kubernetes.io/projected/8d12d285-16c4-4e64-98d8-cff0f581aee4-kube-api-access-mnkvs\") pod \"8d12d285-16c4-4e64-98d8-cff0f581aee4\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369827 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369872 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-tls\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369897 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26gv7\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-kube-api-access-26gv7\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369916 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-operator-scripts\") pod \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\" (UID: \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369939 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-combined-ca-bundle\") pod \"8d12d285-16c4-4e64-98d8-cff0f581aee4\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369974 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5phw\" (UniqueName: \"kubernetes.io/projected/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-kube-api-access-s5phw\") pod \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\" (UID: \"92eb4ce5-fb24-4b33-8e79-6f4e7ba96372\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.369994 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-confd\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370009 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-config-data\") pod \"8d12d285-16c4-4e64-98d8-cff0f581aee4\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370028 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-pod-info\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370047 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-server-conf\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370067 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-kolla-config\") pod \"8d12d285-16c4-4e64-98d8-cff0f581aee4\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370094 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-plugins-conf\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370118 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370137 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-erlang-cookie-secret\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370180 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-erlang-cookie\") pod \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\" (UID: \"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370207 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-memcached-tls-certs\") pod \"8d12d285-16c4-4e64-98d8-cff0f581aee4\" (UID: \"8d12d285-16c4-4e64-98d8-cff0f581aee4\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.370543 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfkfd\" (UniqueName: \"kubernetes.io/projected/1018f07c-38b9-440f-b126-26e59293e757-kube-api-access-wfkfd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.376536 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.376828 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "92eb4ce5-fb24-4b33-8e79-6f4e7ba96372" (UID: "92eb4ce5-fb24-4b33-8e79-6f4e7ba96372"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.379446 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.381458 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "8d12d285-16c4-4e64-98d8-cff0f581aee4" (UID: "8d12d285-16c4-4e64-98d8-cff0f581aee4"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.381902 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-config-data" (OuterVolumeSpecName: "config-data") pod "8d12d285-16c4-4e64-98d8-cff0f581aee4" (UID: "8d12d285-16c4-4e64-98d8-cff0f581aee4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.395447 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-kube-api-access-26gv7" (OuterVolumeSpecName: "kube-api-access-26gv7") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "kube-api-access-26gv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.395814 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.395935 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.396647 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.402664 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-kube-api-access-s5phw" (OuterVolumeSpecName: "kube-api-access-s5phw") pod "92eb4ce5-fb24-4b33-8e79-6f4e7ba96372" (UID: "92eb4ce5-fb24-4b33-8e79-6f4e7ba96372"). InnerVolumeSpecName "kube-api-access-s5phw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.403715 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.449369 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d12d285-16c4-4e64-98d8-cff0f581aee4-kube-api-access-mnkvs" (OuterVolumeSpecName: "kube-api-access-mnkvs") pod "8d12d285-16c4-4e64-98d8-cff0f581aee4" (UID: "8d12d285-16c4-4e64-98d8-cff0f581aee4"). InnerVolumeSpecName "kube-api-access-mnkvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.450685 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-pod-info" (OuterVolumeSpecName: "pod-info") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.457737 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.459474 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.471821 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-erlang-cookie\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.471869 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-confd\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.471906 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.471929 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99708a5d-57d5-4479-8e09-94428bb13fa3-pod-info\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.471955 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-kube-api-access-jbnrh\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472003 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-tls\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472023 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-plugins\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472106 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-plugins-conf\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472146 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472160 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-server-conf\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472180 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99708a5d-57d5-4479-8e09-94428bb13fa3-erlang-cookie-secret\") pod \"99708a5d-57d5-4479-8e09-94428bb13fa3\" (UID: \"99708a5d-57d5-4479-8e09-94428bb13fa3\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472650 4922 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472662 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26gv7\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-kube-api-access-26gv7\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472673 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472681 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5phw\" (UniqueName: \"kubernetes.io/projected/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372-kube-api-access-s5phw\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472690 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472697 4922 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472705 4922 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8d12d285-16c4-4e64-98d8-cff0f581aee4-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472714 4922 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472731 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472740 4922 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472749 4922 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472756 4922 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.472768 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnkvs\" (UniqueName: \"kubernetes.io/projected/8d12d285-16c4-4e64-98d8-cff0f581aee4-kube-api-access-mnkvs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.477523 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" path="/var/lib/kubelet/pods/1d97532b-e9ff-4031-a82c-3db5e943bfd9/volumes" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.478178 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="349fc74f-b0ac-437d-89ab-7106192b8e9e" path="/var/lib/kubelet/pods/349fc74f-b0ac-437d-89ab-7106192b8e9e/volumes" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.480006 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" path="/var/lib/kubelet/pods/39a4d24f-6b5b-48fc-ab66-1ad33462c477/volumes" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.480599 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98e654e6-cf7b-469f-aa60-118fee0e3764" path="/var/lib/kubelet/pods/98e654e6-cf7b-469f-aa60-118fee0e3764/volumes" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.481043 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2566655-e076-471c-af4c-1e218f70ebe1" path="/var/lib/kubelet/pods/b2566655-e076-471c-af4c-1e218f70ebe1/volumes" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.481529 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" path="/var/lib/kubelet/pods/c10f3b66-a7e0-4690-939a-5938de689b3a/volumes" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.482522 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f672d6bb-97fc-4547-a14b-af27d631fe2a" path="/var/lib/kubelet/pods/f672d6bb-97fc-4547-a14b-af27d631fe2a/volumes" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.485409 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.485796 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.487905 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/99708a5d-57d5-4479-8e09-94428bb13fa3-pod-info" (OuterVolumeSpecName: "pod-info") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.490959 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d12d285-16c4-4e64-98d8-cff0f581aee4" (UID: "8d12d285-16c4-4e64-98d8-cff0f581aee4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.492283 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.497487 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data" (OuterVolumeSpecName: "config-data") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.500914 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-kube-api-access-jbnrh" (OuterVolumeSpecName: "kube-api-access-jbnrh") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "kube-api-access-jbnrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.502343 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.506321 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99708a5d-57d5-4479-8e09-94428bb13fa3-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.508406 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.532042 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.565160 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data" (OuterVolumeSpecName: "config-data") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574192 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574219 4922 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574242 4922 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574251 4922 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574259 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574278 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574288 4922 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99708a5d-57d5-4479-8e09-94428bb13fa3-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574297 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574306 4922 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574317 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574326 4922 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99708a5d-57d5-4479-8e09-94428bb13fa3-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.574334 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbnrh\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-kube-api-access-jbnrh\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.574301 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.574389 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts podName:ca6d488f-6085-4e22-a325-1b749d8c154c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:31.574374809 +0000 UTC m=+1616.494770391 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts") pod "placementdbc9-account-delete-68tbl" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c") : configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.574320 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.574496 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts podName:2425a44a-c3c8-4533-9aa6-deb657556efb nodeName:}" failed. No retries permitted until 2025-11-28 07:19:31.574478661 +0000 UTC m=+1616.494874243 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts") pod "novaapi78f1-account-delete-mwv4p" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb") : configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.574334 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.574522 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts podName:3e442fd0-cd46-4c04-afb3-96892d39c0f4 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:31.574516542 +0000 UTC m=+1616.494912124 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts") pod "barbican5229-account-delete-qq87v" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4") : configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.587884 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "8d12d285-16c4-4e64-98d8-cff0f581aee4" (UID: "8d12d285-16c4-4e64-98d8-cff0f581aee4"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.618212 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-server-conf" (OuterVolumeSpecName: "server-conf") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.638064 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-server-conf" (OuterVolumeSpecName: "server-conf") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.644129 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.664983 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" (UID: "4cf25acc-0d60-4b0a-a9c9-adc7ddce7458"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.677283 4922 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8d12d285-16c4-4e64-98d8-cff0f581aee4-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.677423 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.677477 4922 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99708a5d-57d5-4479-8e09-94428bb13fa3-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.677526 4922 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.677573 4922 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.678200 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.681639 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts podName:5e4e7296-ad39-41c1-9399-b3c9072c9158 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:30.681614512 +0000 UTC m=+1615.602010094 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts") pod "novacell032a6-account-delete-tcwgc" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158") : configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.678268 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.682144 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts podName:1f339784-df58-44f7-947d-9d80559c1c0c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:30.682135375 +0000 UTC m=+1615.602530957 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts") pod "glance75df-account-delete-wdw2x" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c") : configmap "openstack-scripts" not found Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.733160 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "99708a5d-57d5-4479-8e09-94428bb13fa3" (UID: "99708a5d-57d5-4479-8e09-94428bb13fa3"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.782541 4922 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99708a5d-57d5-4479-8e09-94428bb13fa3-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867751 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867788 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867801 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867811 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867823 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867831 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5f78c88b9d-zp4nm"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867841 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5f78c88b9d-zp4nm"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867854 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867863 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867871 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867880 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867889 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.867900 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.896175 4922 scope.go:117] "RemoveContainer" containerID="97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.900418 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.963065 4922 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 07:19:29 crc kubenswrapper[4922]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-28T07:19:22Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 07:19:29 crc kubenswrapper[4922]: /etc/init.d/functions: line 589: 435 Alarm clock "$@" Nov 28 07:19:29 crc kubenswrapper[4922]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-xqzrg" message=< Nov 28 07:19:29 crc kubenswrapper[4922]: Exiting ovn-controller (1) [FAILED] Nov 28 07:19:29 crc kubenswrapper[4922]: Killing ovn-controller (1) [ OK ] Nov 28 07:19:29 crc kubenswrapper[4922]: Killing ovn-controller (1) with SIGKILL [ OK ] Nov 28 07:19:29 crc kubenswrapper[4922]: 2025-11-28T07:19:22Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 07:19:29 crc kubenswrapper[4922]: /etc/init.d/functions: line 589: 435 Alarm clock "$@" Nov 28 07:19:29 crc kubenswrapper[4922]: > Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.963138 4922 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 07:19:29 crc kubenswrapper[4922]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-28T07:19:22Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 28 07:19:29 crc kubenswrapper[4922]: /etc/init.d/functions: line 589: 435 Alarm clock "$@" Nov 28 07:19:29 crc kubenswrapper[4922]: > pod="openstack/ovn-controller-xqzrg" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerName="ovn-controller" containerID="cri-o://ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.963179 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-xqzrg" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerName="ovn-controller" containerID="cri-o://ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6" gracePeriod=21 Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.979341 4922 scope.go:117] "RemoveContainer" containerID="97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0" Nov 28 07:19:29 crc kubenswrapper[4922]: E1128 07:19:29.979775 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0\": container with ID starting with 97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0 not found: ID does not exist" containerID="97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.979806 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0"} err="failed to get container status \"97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0\": rpc error: code = NotFound desc = could not find container \"97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0\": container with ID starting with 97f477a9860f7b7b30cd717b142d2d8ffd74a63e7e7efeaa441ecae8870aecb0 not found: ID does not exist" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.979826 4922 scope.go:117] "RemoveContainer" containerID="81e37e3417d1f4f55a00b3a748b722590a99b434ea982924a0a5d757ceb112c8" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.990436 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.990524 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-default\") pod \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.990601 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-galera-tls-certs\") pod \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.990665 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-operator-scripts\") pod \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.990699 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-generated\") pod \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.990732 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kolla-config\") pod \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.990885 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bcdt\" (UniqueName: \"kubernetes.io/projected/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kube-api-access-5bcdt\") pod \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.990936 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-combined-ca-bundle\") pod \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\" (UID: \"c39356f2-8f5d-45d3-8188-7d9428c4d8bf\") " Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.992040 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c39356f2-8f5d-45d3-8188-7d9428c4d8bf" (UID: "c39356f2-8f5d-45d3-8188-7d9428c4d8bf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.992338 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "c39356f2-8f5d-45d3-8188-7d9428c4d8bf" (UID: "c39356f2-8f5d-45d3-8188-7d9428c4d8bf"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.992701 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "c39356f2-8f5d-45d3-8188-7d9428c4d8bf" (UID: "c39356f2-8f5d-45d3-8188-7d9428c4d8bf"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.992871 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "c39356f2-8f5d-45d3-8188-7d9428c4d8bf" (UID: "c39356f2-8f5d-45d3-8188-7d9428c4d8bf"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:29 crc kubenswrapper[4922]: I1128 07:19:29.995880 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kube-api-access-5bcdt" (OuterVolumeSpecName: "kube-api-access-5bcdt") pod "c39356f2-8f5d-45d3-8188-7d9428c4d8bf" (UID: "c39356f2-8f5d-45d3-8188-7d9428c4d8bf"). InnerVolumeSpecName "kube-api-access-5bcdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.005309 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "mysql-db") pod "c39356f2-8f5d-45d3-8188-7d9428c4d8bf" (UID: "c39356f2-8f5d-45d3-8188-7d9428c4d8bf"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.043794 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c39356f2-8f5d-45d3-8188-7d9428c4d8bf" (UID: "c39356f2-8f5d-45d3-8188-7d9428c4d8bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.047282 4922 generic.go:334] "Generic (PLEG): container finished" podID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerID="3f6adb9e4600f443d95c7ef3d113f08146c9cb6aad10859be6aa10d59ce782f0" exitCode=0 Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.047371 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerDied","Data":"3f6adb9e4600f443d95c7ef3d113f08146c9cb6aad10859be6aa10d59ce782f0"} Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.068155 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-n2fp7"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.090509 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-n2fp7"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.093077 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.093112 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.093126 4922 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.093138 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bcdt\" (UniqueName: \"kubernetes.io/projected/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-kube-api-access-5bcdt\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.093149 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.093180 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.093191 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.124907 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.125616 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4cf25acc-0d60-4b0a-a9c9-adc7ddce7458","Type":"ContainerDied","Data":"e3409056409358e85777883f3bf600a01d3c44a5636d56f4cf42551639e283b1"} Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.132046 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "c39356f2-8f5d-45d3-8188-7d9428c4d8bf" (UID: "c39356f2-8f5d-45d3-8188-7d9428c4d8bf"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.133339 4922 generic.go:334] "Generic (PLEG): container finished" podID="5ece1333-c457-4099-bf00-1daa969a14dc" containerID="6c09aa779d052923b62c13eee209268276067fcf2d60f0ab88d8d7db5fd25ca4" exitCode=0 Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.133394 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" event={"ID":"5ece1333-c457-4099-bf00-1daa969a14dc","Type":"ContainerDied","Data":"6c09aa779d052923b62c13eee209268276067fcf2d60f0ab88d8d7db5fd25ca4"} Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.135861 4922 generic.go:334] "Generic (PLEG): container finished" podID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerID="1013a435a5db00a3fabb1bd3f992a2b6780c248c282088a565511d2d85e0aa49" exitCode=0 Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.135948 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" event={"ID":"f8685dc8-7577-4076-8a5a-beba52e9bae7","Type":"ContainerDied","Data":"1013a435a5db00a3fabb1bd3f992a2b6780c248c282088a565511d2d85e0aa49"} Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.137741 4922 generic.go:334] "Generic (PLEG): container finished" podID="81469087-f8d4-4499-a1e3-9fe103758289" containerID="e14162c5f538b87ed64ebb17f2d96cca074bc89ff83ab9d46b7b609216c66fb3" exitCode=0 Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.137782 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c6c69b978-txpld" event={"ID":"81469087-f8d4-4499-a1e3-9fe103758289","Type":"ContainerDied","Data":"e14162c5f538b87ed64ebb17f2d96cca074bc89ff83ab9d46b7b609216c66fb3"} Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.140459 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance75df-account-delete-wdw2x"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.149331 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.150298 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"99708a5d-57d5-4479-8e09-94428bb13fa3","Type":"ContainerDied","Data":"ecc7f30a159e75a43da65e468aa39428aaf9bebe3b66fa2f7aad9d33f5f3926e"} Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.150500 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.160325 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-75df-account-create-update-mxv52"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.166754 4922 generic.go:334] "Generic (PLEG): container finished" podID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" containerID="52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e" exitCode=0 Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.166883 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron5206-account-delete-64tk8" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.167151 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.167567 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c39356f2-8f5d-45d3-8188-7d9428c4d8bf","Type":"ContainerDied","Data":"52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e"} Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.167607 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"c39356f2-8f5d-45d3-8188-7d9428c4d8bf","Type":"ContainerDied","Data":"20b20550f23cb78280ef849113c8204b5d6baf3b4b068ecd797594bbd37d1fb6"} Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.167658 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cindere247-account-delete-5kjlk" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.168105 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.168860 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novacell032a6-account-delete-tcwgc" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.169713 4922 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/glance75df-account-delete-wdw2x" secret="" err="secret \"galera-openstack-dockercfg-882fh\" not found" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.178000 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-75df-account-create-update-mxv52"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.194008 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.198564 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.199108 4922 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/c39356f2-8f5d-45d3-8188-7d9428c4d8bf-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.203726 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.235740 4922 scope.go:117] "RemoveContainer" containerID="585e8abe2762a709ae6ce887d0ef00f835169d14caac14b38ddb07298e1c071f" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.282219 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.298074 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.299354 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.301045 4922 scope.go:117] "RemoveContainer" containerID="bf1cba7c9ab10696b41c6419bb24db199153cd327768de2455c89daa7f33c569" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.324091 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.324608 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.338560 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.339268 4922 scope.go:117] "RemoveContainer" containerID="0b65d7751c631796afddb9d5cb6be8b33791f093200379b257e1458a02ef94be" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.373896 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.380273 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.390854 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-h9s89"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.397867 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-h9s89"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.400608 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ece1333-c457-4099-bf00-1daa969a14dc-logs\") pod \"5ece1333-c457-4099-bf00-1daa969a14dc\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.400653 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data\") pod \"5ece1333-c457-4099-bf00-1daa969a14dc\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.400691 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z875h\" (UniqueName: \"kubernetes.io/projected/5ece1333-c457-4099-bf00-1daa969a14dc-kube-api-access-z875h\") pod \"5ece1333-c457-4099-bf00-1daa969a14dc\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.400722 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data\") pod \"f8685dc8-7577-4076-8a5a-beba52e9bae7\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.400757 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data-custom\") pod \"f8685dc8-7577-4076-8a5a-beba52e9bae7\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.400968 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g7dd\" (UniqueName: \"kubernetes.io/projected/f8685dc8-7577-4076-8a5a-beba52e9bae7-kube-api-access-4g7dd\") pod \"f8685dc8-7577-4076-8a5a-beba52e9bae7\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.401035 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-combined-ca-bundle\") pod \"f8685dc8-7577-4076-8a5a-beba52e9bae7\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.401089 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8685dc8-7577-4076-8a5a-beba52e9bae7-logs\") pod \"f8685dc8-7577-4076-8a5a-beba52e9bae7\" (UID: \"f8685dc8-7577-4076-8a5a-beba52e9bae7\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.401125 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data-custom\") pod \"5ece1333-c457-4099-bf00-1daa969a14dc\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.401176 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-combined-ca-bundle\") pod \"5ece1333-c457-4099-bf00-1daa969a14dc\" (UID: \"5ece1333-c457-4099-bf00-1daa969a14dc\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.402743 4922 scope.go:117] "RemoveContainer" containerID="33fac45176641182615e4dbf61bb82f263874c2935f70fa33ef270e4398e93a2" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.404698 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ece1333-c457-4099-bf00-1daa969a14dc-logs" (OuterVolumeSpecName: "logs") pod "5ece1333-c457-4099-bf00-1daa969a14dc" (UID: "5ece1333-c457-4099-bf00-1daa969a14dc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.414554 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8685dc8-7577-4076-8a5a-beba52e9bae7-logs" (OuterVolumeSpecName: "logs") pod "f8685dc8-7577-4076-8a5a-beba52e9bae7" (UID: "f8685dc8-7577-4076-8a5a-beba52e9bae7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.415299 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-dbc9-account-create-update-rhbx2"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.415791 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f8685dc8-7577-4076-8a5a-beba52e9bae7" (UID: "f8685dc8-7577-4076-8a5a-beba52e9bae7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.416160 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ece1333-c457-4099-bf00-1daa969a14dc-kube-api-access-z875h" (OuterVolumeSpecName: "kube-api-access-z875h") pod "5ece1333-c457-4099-bf00-1daa969a14dc" (UID: "5ece1333-c457-4099-bf00-1daa969a14dc"). InnerVolumeSpecName "kube-api-access-z875h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.420970 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5ece1333-c457-4099-bf00-1daa969a14dc" (UID: "5ece1333-c457-4099-bf00-1daa969a14dc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.422800 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-dbc9-account-create-update-rhbx2"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.428165 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8685dc8-7577-4076-8a5a-beba52e9bae7-kube-api-access-4g7dd" (OuterVolumeSpecName: "kube-api-access-4g7dd") pod "f8685dc8-7577-4076-8a5a-beba52e9bae7" (UID: "f8685dc8-7577-4076-8a5a-beba52e9bae7"). InnerVolumeSpecName "kube-api-access-4g7dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.432351 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ece1333-c457-4099-bf00-1daa969a14dc" (UID: "5ece1333-c457-4099-bf00-1daa969a14dc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.438427 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementdbc9-account-delete-68tbl"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.440149 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placementdbc9-account-delete-68tbl" podUID="ca6d488f-6085-4e22-a325-1b749d8c154c" containerName="mariadb-account-delete" containerID="cri-o://bb3ce5985f855b9f6b316fd8b713d9eabb904b705a7cf10e089e680825de9d7f" gracePeriod=30 Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.452319 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8685dc8-7577-4076-8a5a-beba52e9bae7" (UID: "f8685dc8-7577-4076-8a5a-beba52e9bae7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.466550 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data" (OuterVolumeSpecName: "config-data") pod "f8685dc8-7577-4076-8a5a-beba52e9bae7" (UID: "f8685dc8-7577-4076-8a5a-beba52e9bae7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.497728 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data" (OuterVolumeSpecName: "config-data") pod "5ece1333-c457-4099-bf00-1daa969a14dc" (UID: "5ece1333-c457-4099-bf00-1daa969a14dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505282 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8685dc8-7577-4076-8a5a-beba52e9bae7-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505313 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505324 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505332 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ece1333-c457-4099-bf00-1daa969a14dc-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505340 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ece1333-c457-4099-bf00-1daa969a14dc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505350 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z875h\" (UniqueName: \"kubernetes.io/projected/5ece1333-c457-4099-bf00-1daa969a14dc-kube-api-access-z875h\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505359 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505367 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505374 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4g7dd\" (UniqueName: \"kubernetes.io/projected/f8685dc8-7577-4076-8a5a-beba52e9bae7-kube-api-access-4g7dd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.505383 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8685dc8-7577-4076-8a5a-beba52e9bae7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.580569 4922 scope.go:117] "RemoveContainer" containerID="58bc4c962a13bd392e1f3d48869c89613df686531a50bf08290a8747044899ed" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.632451 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.640130 4922 scope.go:117] "RemoveContainer" containerID="9e43566d759ea7d848615e1c8beb2d9a8c5b517a0be3388bc208d070214e406b" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.676428 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-jtvts"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.690840 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-jtvts"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.704294 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-e247-account-create-update-m48qt"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.708778 4922 scope.go:117] "RemoveContainer" containerID="52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.709146 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-e247-account-create-update-m48qt"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.713179 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-config-data\") pod \"c5b2a607-b6c1-4e95-b722-8b150c25f371\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.713246 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-run-httpd\") pod \"c5b2a607-b6c1-4e95-b722-8b150c25f371\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.713278 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mhwk\" (UniqueName: \"kubernetes.io/projected/c5b2a607-b6c1-4e95-b722-8b150c25f371-kube-api-access-9mhwk\") pod \"c5b2a607-b6c1-4e95-b722-8b150c25f371\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.713309 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-scripts\") pod \"c5b2a607-b6c1-4e95-b722-8b150c25f371\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.713365 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-sg-core-conf-yaml\") pod \"c5b2a607-b6c1-4e95-b722-8b150c25f371\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.713413 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-log-httpd\") pod \"c5b2a607-b6c1-4e95-b722-8b150c25f371\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.713433 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-combined-ca-bundle\") pod \"c5b2a607-b6c1-4e95-b722-8b150c25f371\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.713452 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-ceilometer-tls-certs\") pod \"c5b2a607-b6c1-4e95-b722-8b150c25f371\" (UID: \"c5b2a607-b6c1-4e95-b722-8b150c25f371\") " Nov 28 07:19:30 crc kubenswrapper[4922]: E1128 07:19:30.713995 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:30 crc kubenswrapper[4922]: E1128 07:19:30.714059 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts podName:1f339784-df58-44f7-947d-9d80559c1c0c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:32.714042571 +0000 UTC m=+1617.634438153 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts") pod "glance75df-account-delete-wdw2x" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c") : configmap "openstack-scripts" not found Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.719648 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cindere247-account-delete-5kjlk"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.720546 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c5b2a607-b6c1-4e95-b722-8b150c25f371" (UID: "c5b2a607-b6c1-4e95-b722-8b150c25f371"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.722734 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cindere247-account-delete-5kjlk"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.726847 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5b2a607-b6c1-4e95-b722-8b150c25f371-kube-api-access-9mhwk" (OuterVolumeSpecName: "kube-api-access-9mhwk") pod "c5b2a607-b6c1-4e95-b722-8b150c25f371" (UID: "c5b2a607-b6c1-4e95-b722-8b150c25f371"). InnerVolumeSpecName "kube-api-access-9mhwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.727133 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c5b2a607-b6c1-4e95-b722-8b150c25f371" (UID: "c5b2a607-b6c1-4e95-b722-8b150c25f371"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: E1128 07:19:30.727399 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:30 crc kubenswrapper[4922]: E1128 07:19:30.727468 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts podName:5e4e7296-ad39-41c1-9399-b3c9072c9158 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:32.727449218 +0000 UTC m=+1617.647844800 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts") pod "novacell032a6-account-delete-tcwgc" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158") : configmap "openstack-scripts" not found Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.737459 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-scripts" (OuterVolumeSpecName: "scripts") pod "c5b2a607-b6c1-4e95-b722-8b150c25f371" (UID: "c5b2a607-b6c1-4e95-b722-8b150c25f371"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.747200 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-th8tp"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.750088 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.754394 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-th8tp"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.775298 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c5b2a607-b6c1-4e95-b722-8b150c25f371" (UID: "c5b2a607-b6c1-4e95-b722-8b150c25f371"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.775364 4922 scope.go:117] "RemoveContainer" containerID="69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.787324 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron5206-account-delete-64tk8"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.794493 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5206-account-create-update-97znp"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.796826 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-xqzrg_e5e01f31-28bd-46a2-b5cc-695c485deaf6/ovn-controller/0.log" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.796888 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.797205 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c5b2a607-b6c1-4e95-b722-8b150c25f371" (UID: "c5b2a607-b6c1-4e95-b722-8b150c25f371"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.803588 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5206-account-create-update-97znp"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.809322 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron5206-account-delete-64tk8"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.814470 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-public-tls-certs\") pod \"81469087-f8d4-4499-a1e3-9fe103758289\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.814532 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-fernet-keys\") pod \"81469087-f8d4-4499-a1e3-9fe103758289\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.814585 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-config-data\") pod \"81469087-f8d4-4499-a1e3-9fe103758289\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.814633 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzxc4\" (UniqueName: \"kubernetes.io/projected/81469087-f8d4-4499-a1e3-9fe103758289-kube-api-access-hzxc4\") pod \"81469087-f8d4-4499-a1e3-9fe103758289\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.814674 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-credential-keys\") pod \"81469087-f8d4-4499-a1e3-9fe103758289\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.814711 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-combined-ca-bundle\") pod \"81469087-f8d4-4499-a1e3-9fe103758289\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.814740 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-internal-tls-certs\") pod \"81469087-f8d4-4499-a1e3-9fe103758289\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.814798 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-scripts\") pod \"81469087-f8d4-4499-a1e3-9fe103758289\" (UID: \"81469087-f8d4-4499-a1e3-9fe103758289\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.815110 4922 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.815122 4922 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.815130 4922 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.815138 4922 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5b2a607-b6c1-4e95-b722-8b150c25f371-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.815147 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mhwk\" (UniqueName: \"kubernetes.io/projected/c5b2a607-b6c1-4e95-b722-8b150c25f371-kube-api-access-9mhwk\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.815156 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.822371 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-scripts" (OuterVolumeSpecName: "scripts") pod "81469087-f8d4-4499-a1e3-9fe103758289" (UID: "81469087-f8d4-4499-a1e3-9fe103758289"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.826007 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "81469087-f8d4-4499-a1e3-9fe103758289" (UID: "81469087-f8d4-4499-a1e3-9fe103758289"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.839781 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "81469087-f8d4-4499-a1e3-9fe103758289" (UID: "81469087-f8d4-4499-a1e3-9fe103758289"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.852589 4922 scope.go:117] "RemoveContainer" containerID="52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e" Nov 28 07:19:30 crc kubenswrapper[4922]: E1128 07:19:30.855635 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e\": container with ID starting with 52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e not found: ID does not exist" containerID="52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.855678 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e"} err="failed to get container status \"52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e\": rpc error: code = NotFound desc = could not find container \"52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e\": container with ID starting with 52ecbf9b71e70de5c3daba2523d0579eaa9f475b46a7fa4424c1f79302e0602e not found: ID does not exist" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.855701 4922 scope.go:117] "RemoveContainer" containerID="69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.855838 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 07:19:30 crc kubenswrapper[4922]: E1128 07:19:30.856426 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1\": container with ID starting with 69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1 not found: ID does not exist" containerID="69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.856455 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1"} err="failed to get container status \"69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1\": rpc error: code = NotFound desc = could not find container \"69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1\": container with ID starting with 69d69b3f84197c0fb9fcf8a14b6ee2ee83a51d46a61eb90348dde8a31602b9e1 not found: ID does not exist" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.874864 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81469087-f8d4-4499-a1e3-9fe103758289-kube-api-access-hzxc4" (OuterVolumeSpecName: "kube-api-access-hzxc4") pod "81469087-f8d4-4499-a1e3-9fe103758289" (UID: "81469087-f8d4-4499-a1e3-9fe103758289"). InnerVolumeSpecName "kube-api-access-hzxc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.893596 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "81469087-f8d4-4499-a1e3-9fe103758289" (UID: "81469087-f8d4-4499-a1e3-9fe103758289"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.893685 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-lx69b"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.906094 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-lx69b"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.922246 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-5229-account-create-update-2k5qm"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.922745 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5e01f31-28bd-46a2-b5cc-695c485deaf6-scripts\") pod \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.922826 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-config-data\") pod \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.922880 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-ovn-controller-tls-certs\") pod \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.922921 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rtbd\" (UniqueName: \"kubernetes.io/projected/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-kube-api-access-9rtbd\") pod \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.922940 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run-ovn\") pod \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.922977 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-combined-ca-bundle\") pod \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.922991 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-log-ovn\") pod \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.923038 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfnr9\" (UniqueName: \"kubernetes.io/projected/e5e01f31-28bd-46a2-b5cc-695c485deaf6-kube-api-access-zfnr9\") pod \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.923109 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run\") pod \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\" (UID: \"e5e01f31-28bd-46a2-b5cc-695c485deaf6\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.923139 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-combined-ca-bundle\") pod \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\" (UID: \"7dfc2e52-b959-4718-8f85-5bcec1a8ad10\") " Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.923768 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzxc4\" (UniqueName: \"kubernetes.io/projected/81469087-f8d4-4499-a1e3-9fe103758289-kube-api-access-hzxc4\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.923783 4922 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.923792 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.923802 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.923810 4922 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.925879 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5e01f31-28bd-46a2-b5cc-695c485deaf6-scripts" (OuterVolumeSpecName: "scripts") pod "e5e01f31-28bd-46a2-b5cc-695c485deaf6" (UID: "e5e01f31-28bd-46a2-b5cc-695c485deaf6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.925932 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e5e01f31-28bd-46a2-b5cc-695c485deaf6" (UID: "e5e01f31-28bd-46a2-b5cc-695c485deaf6"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.926156 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e5e01f31-28bd-46a2-b5cc-695c485deaf6" (UID: "e5e01f31-28bd-46a2-b5cc-695c485deaf6"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.929310 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican5229-account-delete-qq87v"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.929542 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican5229-account-delete-qq87v" podUID="3e442fd0-cd46-4c04-afb3-96892d39c0f4" containerName="mariadb-account-delete" containerID="cri-o://48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9" gracePeriod=30 Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.929586 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run" (OuterVolumeSpecName: "var-run") pod "e5e01f31-28bd-46a2-b5cc-695c485deaf6" (UID: "e5e01f31-28bd-46a2-b5cc-695c485deaf6"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.931717 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5b2a607-b6c1-4e95-b722-8b150c25f371" (UID: "c5b2a607-b6c1-4e95-b722-8b150c25f371"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.937045 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-5229-account-create-update-2k5qm"] Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.939120 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "81469087-f8d4-4499-a1e3-9fe103758289" (UID: "81469087-f8d4-4499-a1e3-9fe103758289"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.941473 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5e01f31-28bd-46a2-b5cc-695c485deaf6-kube-api-access-zfnr9" (OuterVolumeSpecName: "kube-api-access-zfnr9") pod "e5e01f31-28bd-46a2-b5cc-695c485deaf6" (UID: "e5e01f31-28bd-46a2-b5cc-695c485deaf6"). InnerVolumeSpecName "kube-api-access-zfnr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.943433 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-kube-api-access-9rtbd" (OuterVolumeSpecName: "kube-api-access-9rtbd") pod "7dfc2e52-b959-4718-8f85-5bcec1a8ad10" (UID: "7dfc2e52-b959-4718-8f85-5bcec1a8ad10"). InnerVolumeSpecName "kube-api-access-9rtbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.966919 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-config-data" (OuterVolumeSpecName: "config-data") pod "81469087-f8d4-4499-a1e3-9fe103758289" (UID: "81469087-f8d4-4499-a1e3-9fe103758289"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.970465 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "81469087-f8d4-4499-a1e3-9fe103758289" (UID: "81469087-f8d4-4499-a1e3-9fe103758289"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.987420 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7dfc2e52-b959-4718-8f85-5bcec1a8ad10" (UID: "7dfc2e52-b959-4718-8f85-5bcec1a8ad10"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.991087 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5e01f31-28bd-46a2-b5cc-695c485deaf6" (UID: "e5e01f31-28bd-46a2-b5cc-695c485deaf6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.993399 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-config-data" (OuterVolumeSpecName: "config-data") pod "7dfc2e52-b959-4718-8f85-5bcec1a8ad10" (UID: "7dfc2e52-b959-4718-8f85-5bcec1a8ad10"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:30 crc kubenswrapper[4922]: I1128 07:19:30.997555 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-config-data" (OuterVolumeSpecName: "config-data") pod "c5b2a607-b6c1-4e95-b722-8b150c25f371" (UID: "c5b2a607-b6c1-4e95-b722-8b150c25f371"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026421 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rtbd\" (UniqueName: \"kubernetes.io/projected/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-kube-api-access-9rtbd\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026452 4922 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026463 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026472 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026480 4922 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026488 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026498 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfnr9\" (UniqueName: \"kubernetes.io/projected/e5e01f31-28bd-46a2-b5cc-695c485deaf6-kube-api-access-zfnr9\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026506 4922 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e5e01f31-28bd-46a2-b5cc-695c485deaf6-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026514 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026523 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026530 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e5e01f31-28bd-46a2-b5cc-695c485deaf6-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026538 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b2a607-b6c1-4e95-b722-8b150c25f371-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026545 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dfc2e52-b959-4718-8f85-5bcec1a8ad10-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.026553 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81469087-f8d4-4499-a1e3-9fe103758289-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.028402 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "e5e01f31-28bd-46a2-b5cc-695c485deaf6" (UID: "e5e01f31-28bd-46a2-b5cc-695c485deaf6"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.047026 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-85r25"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.069928 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-85r25"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.076367 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-78f1-account-create-update-chz7p"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.128560 4922 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5e01f31-28bd-46a2-b5cc-695c485deaf6-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.128601 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-78f1-account-create-update-chz7p"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.138671 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi78f1-account-delete-mwv4p"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.138933 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novaapi78f1-account-delete-mwv4p" podUID="2425a44a-c3c8-4533-9aa6-deb657556efb" containerName="mariadb-account-delete" containerID="cri-o://d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e" gracePeriod=30 Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.152378 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-8tbc2"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.155650 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-8tbc2"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.166582 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell032a6-account-delete-tcwgc"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.172504 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-32a6-account-create-update-j752d"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.180972 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-32a6-account-create-update-j752d"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.185482 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e070595b-ded5-4ba1-8e5d-10dee3f64439/ovn-northd/0.log" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.185527 4922 generic.go:334] "Generic (PLEG): container finished" podID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerID="ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" exitCode=139 Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.185607 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e070595b-ded5-4ba1-8e5d-10dee3f64439","Type":"ContainerDied","Data":"ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.190145 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" event={"ID":"f8685dc8-7577-4076-8a5a-beba52e9bae7","Type":"ContainerDied","Data":"d9e9c2b95a6180c1348b95175cebe7c415871cdb6d47555bec506e6e6e9dff41"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.190173 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5b7cbd995c-rwlzz" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.190193 4922 scope.go:117] "RemoveContainer" containerID="1013a435a5db00a3fabb1bd3f992a2b6780c248c282088a565511d2d85e0aa49" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.198739 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" event={"ID":"5ece1333-c457-4099-bf00-1daa969a14dc","Type":"ContainerDied","Data":"547b7eb2131d1818826d54e1ace34f79edd3b16dc86f8564a9effb4b0ce5247c"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.198964 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-84495d76c8-mkvcb" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.203766 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.203900 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5b2a607-b6c1-4e95-b722-8b150c25f371","Type":"ContainerDied","Data":"4720d32ff570d517cde351d4da303593a2676aa6f64374e9373752f76e7c7438"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.231610 4922 scope.go:117] "RemoveContainer" containerID="b4963147ea9e2c2244fba11f3323e999f07d35d36b28648b34f61f69856ae968" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.278729 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-5b7cbd995c-rwlzz"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.294226 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-5b7cbd995c-rwlzz"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.314253 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.326780 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c6c69b978-txpld" event={"ID":"81469087-f8d4-4499-a1e3-9fe103758289","Type":"ContainerDied","Data":"b2ce01c49b5e4a1a63faa18486a3af0a31159a3792bfda73d076aad0478560a8"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.326901 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c6c69b978-txpld" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.335830 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-xqzrg_e5e01f31-28bd-46a2-b5cc-695c485deaf6/ovn-controller/0.log" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.335866 4922 generic.go:334] "Generic (PLEG): container finished" podID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerID="ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6" exitCode=137 Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.335908 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg" event={"ID":"e5e01f31-28bd-46a2-b5cc-695c485deaf6","Type":"ContainerDied","Data":"ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.335933 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-xqzrg" event={"ID":"e5e01f31-28bd-46a2-b5cc-695c485deaf6","Type":"ContainerDied","Data":"3690849f7167495c60259b16baa6d41a1a79a1d99bbe1ad074b5a9459961e00f"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.335997 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-xqzrg" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.343974 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.344099 4922 generic.go:334] "Generic (PLEG): container finished" podID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" exitCode=0 Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.344156 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"7dfc2e52-b959-4718-8f85-5bcec1a8ad10","Type":"ContainerDied","Data":"e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.344180 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"7dfc2e52-b959-4718-8f85-5bcec1a8ad10","Type":"ContainerDied","Data":"46ede5c07bbe0eb6357b27c8a8cafeb5d767760d0bf355ecdd0cb6a1b85fbe13"} Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.349644 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.350766 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novacell032a6-account-delete-tcwgc" podUID="5e4e7296-ad39-41c1-9399-b3c9072c9158" containerName="mariadb-account-delete" containerID="cri-o://1b294e250b4e8005476292722797df8e5418fe5de28dbe00f75e79efc8faf395" gracePeriod=30 Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.350876 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance75df-account-delete-wdw2x" podUID="1f339784-df58-44f7-947d-9d80559c1c0c" containerName="mariadb-account-delete" containerID="cri-o://e61f1fa7822b5f2b3605c61ec01fc8d12a428bfdcca76852ff14c1bcc915a5cd" gracePeriod=30 Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.364915 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-84495d76c8-mkvcb"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.368728 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-84495d76c8-mkvcb"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.413316 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" path="/var/lib/kubelet/pods/02d26a97-d447-4f76-90ed-9357e343cd91/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.414181 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" path="/var/lib/kubelet/pods/0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.415789 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1018f07c-38b9-440f-b126-26e59293e757" path="/var/lib/kubelet/pods/1018f07c-38b9-440f-b126-26e59293e757/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.417388 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="182970fb-401f-404c-81c1-db0294b02167" path="/var/lib/kubelet/pods/182970fb-401f-404c-81c1-db0294b02167/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.418761 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d0a1633-2540-4354-93e9-8e963e8245f0" path="/var/lib/kubelet/pods/2d0a1633-2540-4354-93e9-8e963e8245f0/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.419388 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f1294e8-adc5-47fa-a186-4ef79c093a3a" path="/var/lib/kubelet/pods/2f1294e8-adc5-47fa-a186-4ef79c093a3a/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.420521 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30134dd7-1b96-4057-8065-c3e59e70baff" path="/var/lib/kubelet/pods/30134dd7-1b96-4057-8065-c3e59e70baff/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.421121 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36015a39-56bc-437e-9cdb-93f8ad059c45" path="/var/lib/kubelet/pods/36015a39-56bc-437e-9cdb-93f8ad059c45/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.421965 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" path="/var/lib/kubelet/pods/4cf25acc-0d60-4b0a-a9c9-adc7ddce7458/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.423199 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" path="/var/lib/kubelet/pods/5ece1333-c457-4099-bf00-1daa969a14dc/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.423902 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="674d4d02-4734-4cb5-a316-b12a9dcb974c" path="/var/lib/kubelet/pods/674d4d02-4734-4cb5-a316-b12a9dcb974c/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.424516 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a3388c9-ec6c-400b-bfcd-59374499fd42" path="/var/lib/kubelet/pods/6a3388c9-ec6c-400b-bfcd-59374499fd42/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.425784 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="711e933a-0d19-4c60-90fe-c1c3ed0174ed" path="/var/lib/kubelet/pods/711e933a-0d19-4c60-90fe-c1c3ed0174ed/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.426521 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" path="/var/lib/kubelet/pods/7c0f0857-2ca9-49c8-90ac-1351b2ee2f11/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.427203 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" path="/var/lib/kubelet/pods/7e1382f2-6597-4c09-a171-8709e4b9f5f7/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.428627 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="813e18d9-b3c1-48eb-aff7-197d425910ef" path="/var/lib/kubelet/pods/813e18d9-b3c1-48eb-aff7-197d425910ef/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.429303 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d12d285-16c4-4e64-98d8-cff0f581aee4" path="/var/lib/kubelet/pods/8d12d285-16c4-4e64-98d8-cff0f581aee4/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.429859 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92eb4ce5-fb24-4b33-8e79-6f4e7ba96372" path="/var/lib/kubelet/pods/92eb4ce5-fb24-4b33-8e79-6f4e7ba96372/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.430965 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="932af8b2-8cfa-4807-a6a4-c2cb82e2bf82" path="/var/lib/kubelet/pods/932af8b2-8cfa-4807-a6a4-c2cb82e2bf82/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.431769 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" path="/var/lib/kubelet/pods/99708a5d-57d5-4479-8e09-94428bb13fa3/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.432494 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1e1fd22-8937-4a32-bf98-06e3655deb07" path="/var/lib/kubelet/pods/b1e1fd22-8937-4a32-bf98-06e3655deb07/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.433808 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2273c58-3ea7-4979-a39d-6e9ba47b15aa" path="/var/lib/kubelet/pods/b2273c58-3ea7-4979-a39d-6e9ba47b15aa/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.434487 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" path="/var/lib/kubelet/pods/c038b865-4b32-4be3-9e0a-8c40dc140a68/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.435467 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" path="/var/lib/kubelet/pods/c39356f2-8f5d-45d3-8188-7d9428c4d8bf/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.436685 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" path="/var/lib/kubelet/pods/c5b2a607-b6c1-4e95-b722-8b150c25f371/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.437491 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c77ee72a-1c8d-4994-8d07-a0338a55489a" path="/var/lib/kubelet/pods/c77ee72a-1c8d-4994-8d07-a0338a55489a/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.438595 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8fb5604-6627-4b59-9407-fe88464ba214" path="/var/lib/kubelet/pods/c8fb5604-6627-4b59-9407-fe88464ba214/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.439246 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce9e0923-85c6-48cc-bfa7-9b51637a188d" path="/var/lib/kubelet/pods/ce9e0923-85c6-48cc-bfa7-9b51637a188d/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.439870 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" path="/var/lib/kubelet/pods/ed30601b-1e7c-4aa6-8469-8ff61cd93253/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.441005 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" path="/var/lib/kubelet/pods/f8685dc8-7577-4076-8a5a-beba52e9bae7/volumes" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.517148 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-c6c69b978-txpld"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.518418 4922 scope.go:117] "RemoveContainer" containerID="6c09aa779d052923b62c13eee209268276067fcf2d60f0ab88d8d7db5fd25ca4" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.522842 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e070595b-ded5-4ba1-8e5d-10dee3f64439/ovn-northd/0.log" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.522903 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.529645 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-c6c69b978-txpld"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.549778 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-xqzrg"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.565690 4922 scope.go:117] "RemoveContainer" containerID="3004dce57f11a2ee90d32c564f6d0b320053fb2f43c4069e423362623aae9bfa" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.566143 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-xqzrg"] Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.587576 4922 scope.go:117] "RemoveContainer" containerID="46994e6ac2bb018930a78ad0cd1fe4646ebb6039110f4ddf448db9a5fa5fd689" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.603950 4922 scope.go:117] "RemoveContainer" containerID="1e15529dfacc098964836d9b402c0c6f8dfa68f13f57273de3bff5e93b296db5" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.625583 4922 scope.go:117] "RemoveContainer" containerID="3f6adb9e4600f443d95c7ef3d113f08146c9cb6aad10859be6aa10d59ce782f0" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.638732 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-combined-ca-bundle\") pod \"e070595b-ded5-4ba1-8e5d-10dee3f64439\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.638795 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htpqg\" (UniqueName: \"kubernetes.io/projected/e070595b-ded5-4ba1-8e5d-10dee3f64439-kube-api-access-htpqg\") pod \"e070595b-ded5-4ba1-8e5d-10dee3f64439\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.638844 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-rundir\") pod \"e070595b-ded5-4ba1-8e5d-10dee3f64439\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.638862 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-northd-tls-certs\") pod \"e070595b-ded5-4ba1-8e5d-10dee3f64439\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.638916 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-config\") pod \"e070595b-ded5-4ba1-8e5d-10dee3f64439\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.638962 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-metrics-certs-tls-certs\") pod \"e070595b-ded5-4ba1-8e5d-10dee3f64439\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.639011 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-scripts\") pod \"e070595b-ded5-4ba1-8e5d-10dee3f64439\" (UID: \"e070595b-ded5-4ba1-8e5d-10dee3f64439\") " Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.639378 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.639424 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts podName:2425a44a-c3c8-4533-9aa6-deb657556efb nodeName:}" failed. No retries permitted until 2025-11-28 07:19:35.639409761 +0000 UTC m=+1620.559805333 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts") pod "novaapi78f1-account-delete-mwv4p" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb") : configmap "openstack-scripts" not found Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.643176 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.643377 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts podName:ca6d488f-6085-4e22-a325-1b749d8c154c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:35.643352785 +0000 UTC m=+1620.563748457 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts") pod "placementdbc9-account-delete-68tbl" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c") : configmap "openstack-scripts" not found Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.643505 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "e070595b-ded5-4ba1-8e5d-10dee3f64439" (UID: "e070595b-ded5-4ba1-8e5d-10dee3f64439"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.644509 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-scripts" (OuterVolumeSpecName: "scripts") pod "e070595b-ded5-4ba1-8e5d-10dee3f64439" (UID: "e070595b-ded5-4ba1-8e5d-10dee3f64439"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.645120 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.645267 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts podName:3e442fd0-cd46-4c04-afb3-96892d39c0f4 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:35.645254526 +0000 UTC m=+1620.565650218 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts") pod "barbican5229-account-delete-qq87v" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4") : configmap "openstack-scripts" not found Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.652175 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-config" (OuterVolumeSpecName: "config") pod "e070595b-ded5-4ba1-8e5d-10dee3f64439" (UID: "e070595b-ded5-4ba1-8e5d-10dee3f64439"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.659854 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e070595b-ded5-4ba1-8e5d-10dee3f64439-kube-api-access-htpqg" (OuterVolumeSpecName: "kube-api-access-htpqg") pod "e070595b-ded5-4ba1-8e5d-10dee3f64439" (UID: "e070595b-ded5-4ba1-8e5d-10dee3f64439"). InnerVolumeSpecName "kube-api-access-htpqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.740678 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htpqg\" (UniqueName: \"kubernetes.io/projected/e070595b-ded5-4ba1-8e5d-10dee3f64439-kube-api-access-htpqg\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.740739 4922 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.740753 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.740767 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e070595b-ded5-4ba1-8e5d-10dee3f64439-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.775063 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7947bcd956-482dv" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.163:9311/healthcheck\": context deadline exceeded" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.775191 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e070595b-ded5-4ba1-8e5d-10dee3f64439" (UID: "e070595b-ded5-4ba1-8e5d-10dee3f64439"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.775063 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7947bcd956-482dv" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.163:9311/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.789357 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "e070595b-ded5-4ba1-8e5d-10dee3f64439" (UID: "e070595b-ded5-4ba1-8e5d-10dee3f64439"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.809117 4922 scope.go:117] "RemoveContainer" containerID="76e1e7d5729fdebb0173eee23985b2e33e4bffc2543f1da0f92592472e21c4ec" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.827685 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "e070595b-ded5-4ba1-8e5d-10dee3f64439" (UID: "e070595b-ded5-4ba1-8e5d-10dee3f64439"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.842393 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.842427 4922 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.842440 4922 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e070595b-ded5-4ba1-8e5d-10dee3f64439-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.887315 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.164:8776/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.899419 4922 scope.go:117] "RemoveContainer" containerID="e14162c5f538b87ed64ebb17f2d96cca074bc89ff83ab9d46b7b609216c66fb3" Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.952507 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.952865 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.954348 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.954403 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.954421 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.956647 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.969280 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:31 crc kubenswrapper[4922]: E1128 07:19:31.969347 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" Nov 28 07:19:31 crc kubenswrapper[4922]: I1128 07:19:31.987831 4922 scope.go:117] "RemoveContainer" containerID="ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.015378 4922 scope.go:117] "RemoveContainer" containerID="ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6" Nov 28 07:19:32 crc kubenswrapper[4922]: E1128 07:19:32.015943 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6\": container with ID starting with ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6 not found: ID does not exist" containerID="ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.015981 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6"} err="failed to get container status \"ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6\": rpc error: code = NotFound desc = could not find container \"ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6\": container with ID starting with ce9c4ff1ad6613830ac06a1b349aa3a3f88a3726cd3576c4fc3e803b5a7ea1c6 not found: ID does not exist" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.016006 4922 scope.go:117] "RemoveContainer" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.037529 4922 scope.go:117] "RemoveContainer" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" Nov 28 07:19:32 crc kubenswrapper[4922]: E1128 07:19:32.037965 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de\": container with ID starting with e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de not found: ID does not exist" containerID="e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.038004 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de"} err="failed to get container status \"e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de\": rpc error: code = NotFound desc = could not find container \"e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de\": container with ID starting with e49fd2c5ff7e7c88e4e82dd56690d77b902a8ad478e8bdadcc87b5a3e2af26de not found: ID does not exist" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.365683 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e070595b-ded5-4ba1-8e5d-10dee3f64439/ovn-northd/0.log" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.365764 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e070595b-ded5-4ba1-8e5d-10dee3f64439","Type":"ContainerDied","Data":"820b9b4afab004f353730beeb209fca5f5cabbd4b28c95731bb81073037f35cb"} Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.365819 4922 scope.go:117] "RemoveContainer" containerID="080f55e8e51ff6a214b0fe9fe62cc38adee207ae9bc0a4e40e78d515f29e447e" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.365933 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.452016 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.458127 4922 scope.go:117] "RemoveContainer" containerID="ab03f8552f326c9c76a50463baa6a28a8cfaa27ea7ce5e6c3db040730b019068" Nov 28 07:19:32 crc kubenswrapper[4922]: I1128 07:19:32.458955 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 07:19:32 crc kubenswrapper[4922]: E1128 07:19:32.756643 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:32 crc kubenswrapper[4922]: E1128 07:19:32.756705 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts podName:5e4e7296-ad39-41c1-9399-b3c9072c9158 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:36.756691527 +0000 UTC m=+1621.677087109 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts") pod "novacell032a6-account-delete-tcwgc" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158") : configmap "openstack-scripts" not found Nov 28 07:19:32 crc kubenswrapper[4922]: E1128 07:19:32.756789 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:32 crc kubenswrapper[4922]: E1128 07:19:32.756849 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts podName:1f339784-df58-44f7-947d-9d80559c1c0c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:36.756835461 +0000 UTC m=+1621.677231043 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts") pod "glance75df-account-delete-wdw2x" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c") : configmap "openstack-scripts" not found Nov 28 07:19:33 crc kubenswrapper[4922]: I1128 07:19:33.413457 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81469087-f8d4-4499-a1e3-9fe103758289" path="/var/lib/kubelet/pods/81469087-f8d4-4499-a1e3-9fe103758289/volumes" Nov 28 07:19:33 crc kubenswrapper[4922]: I1128 07:19:33.415070 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" path="/var/lib/kubelet/pods/e070595b-ded5-4ba1-8e5d-10dee3f64439/volumes" Nov 28 07:19:33 crc kubenswrapper[4922]: I1128 07:19:33.416150 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" path="/var/lib/kubelet/pods/e5e01f31-28bd-46a2-b5cc-695c485deaf6/volumes" Nov 28 07:19:33 crc kubenswrapper[4922]: I1128 07:19:33.920520 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: i/o timeout" Nov 28 07:19:35 crc kubenswrapper[4922]: I1128 07:19:35.360510 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:35 crc kubenswrapper[4922]: E1128 07:19:35.730175 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:35 crc kubenswrapper[4922]: E1128 07:19:35.730744 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts podName:ca6d488f-6085-4e22-a325-1b749d8c154c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:43.730729285 +0000 UTC m=+1628.651124857 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts") pod "placementdbc9-account-delete-68tbl" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c") : configmap "openstack-scripts" not found Nov 28 07:19:35 crc kubenswrapper[4922]: E1128 07:19:35.730289 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:35 crc kubenswrapper[4922]: E1128 07:19:35.730898 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts podName:3e442fd0-cd46-4c04-afb3-96892d39c0f4 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:43.730890329 +0000 UTC m=+1628.651285911 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts") pod "barbican5229-account-delete-qq87v" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4") : configmap "openstack-scripts" not found Nov 28 07:19:35 crc kubenswrapper[4922]: E1128 07:19:35.730294 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:35 crc kubenswrapper[4922]: E1128 07:19:35.731060 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts podName:2425a44a-c3c8-4533-9aa6-deb657556efb nodeName:}" failed. No retries permitted until 2025-11-28 07:19:43.731029683 +0000 UTC m=+1628.651425265 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts") pod "novaapi78f1-account-delete-mwv4p" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb") : configmap "openstack-scripts" not found Nov 28 07:19:36 crc kubenswrapper[4922]: E1128 07:19:36.850036 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:36 crc kubenswrapper[4922]: E1128 07:19:36.850116 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts podName:5e4e7296-ad39-41c1-9399-b3c9072c9158 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:44.85010123 +0000 UTC m=+1629.770496812 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts") pod "novacell032a6-account-delete-tcwgc" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158") : configmap "openstack-scripts" not found Nov 28 07:19:36 crc kubenswrapper[4922]: E1128 07:19:36.850051 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:36 crc kubenswrapper[4922]: E1128 07:19:36.850291 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts podName:1f339784-df58-44f7-947d-9d80559c1c0c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:44.850254274 +0000 UTC m=+1629.770649916 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts") pod "glance75df-account-delete-wdw2x" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c") : configmap "openstack-scripts" not found Nov 28 07:19:37 crc kubenswrapper[4922]: E1128 07:19:37.153708 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:37 crc kubenswrapper[4922]: E1128 07:19:37.153851 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:37 crc kubenswrapper[4922]: I1128 07:19:37.153980 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:37 crc kubenswrapper[4922]: E1128 07:19:37.156014 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:37 crc kubenswrapper[4922]: E1128 07:19:37.157682 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:37 crc kubenswrapper[4922]: E1128 07:19:37.158696 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:37 crc kubenswrapper[4922]: E1128 07:19:37.158736 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" Nov 28 07:19:37 crc kubenswrapper[4922]: E1128 07:19:37.164035 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:37 crc kubenswrapper[4922]: E1128 07:19:37.164112 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" Nov 28 07:19:37 crc kubenswrapper[4922]: I1128 07:19:37.207256 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:37 crc kubenswrapper[4922]: I1128 07:19:37.410987 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lpw65"] Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.199822 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lpw65" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="registry-server" containerID="cri-o://cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390" gracePeriod=2 Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.697630 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.802456 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-utilities\") pod \"21fdc98b-667f-44b1-9fae-87f96ba4b514\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.802655 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-catalog-content\") pod \"21fdc98b-667f-44b1-9fae-87f96ba4b514\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.802757 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l926r\" (UniqueName: \"kubernetes.io/projected/21fdc98b-667f-44b1-9fae-87f96ba4b514-kube-api-access-l926r\") pod \"21fdc98b-667f-44b1-9fae-87f96ba4b514\" (UID: \"21fdc98b-667f-44b1-9fae-87f96ba4b514\") " Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.803428 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-utilities" (OuterVolumeSpecName: "utilities") pod "21fdc98b-667f-44b1-9fae-87f96ba4b514" (UID: "21fdc98b-667f-44b1-9fae-87f96ba4b514"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.824405 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21fdc98b-667f-44b1-9fae-87f96ba4b514-kube-api-access-l926r" (OuterVolumeSpecName: "kube-api-access-l926r") pod "21fdc98b-667f-44b1-9fae-87f96ba4b514" (UID: "21fdc98b-667f-44b1-9fae-87f96ba4b514"). InnerVolumeSpecName "kube-api-access-l926r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.869158 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "21fdc98b-667f-44b1-9fae-87f96ba4b514" (UID: "21fdc98b-667f-44b1-9fae-87f96ba4b514"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.904798 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l926r\" (UniqueName: \"kubernetes.io/projected/21fdc98b-667f-44b1-9fae-87f96ba4b514-kube-api-access-l926r\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.905033 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:39 crc kubenswrapper[4922]: I1128 07:19:39.905148 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/21fdc98b-667f-44b1-9fae-87f96ba4b514-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.211273 4922 generic.go:334] "Generic (PLEG): container finished" podID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerID="cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390" exitCode=0 Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.211320 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lpw65" event={"ID":"21fdc98b-667f-44b1-9fae-87f96ba4b514","Type":"ContainerDied","Data":"cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390"} Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.211349 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lpw65" event={"ID":"21fdc98b-667f-44b1-9fae-87f96ba4b514","Type":"ContainerDied","Data":"c307381d7222c0990ec28be82766db01bec35a83e3c6400b127d7aab77071ee7"} Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.211350 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lpw65" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.211365 4922 scope.go:117] "RemoveContainer" containerID="cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.235111 4922 scope.go:117] "RemoveContainer" containerID="828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.260790 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lpw65"] Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.271395 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lpw65"] Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.278614 4922 scope.go:117] "RemoveContainer" containerID="48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.299387 4922 scope.go:117] "RemoveContainer" containerID="cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390" Nov 28 07:19:40 crc kubenswrapper[4922]: E1128 07:19:40.299998 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390\": container with ID starting with cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390 not found: ID does not exist" containerID="cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.300077 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390"} err="failed to get container status \"cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390\": rpc error: code = NotFound desc = could not find container \"cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390\": container with ID starting with cb3eeaf50ddcbe2019ae022ab8a865cfca1a5222a14f1846ee522f8f346f3390 not found: ID does not exist" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.300127 4922 scope.go:117] "RemoveContainer" containerID="828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089" Nov 28 07:19:40 crc kubenswrapper[4922]: E1128 07:19:40.300732 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089\": container with ID starting with 828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089 not found: ID does not exist" containerID="828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.300773 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089"} err="failed to get container status \"828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089\": rpc error: code = NotFound desc = could not find container \"828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089\": container with ID starting with 828100784a957c933fd986d8e524d4ea965617a13f9b745a0e7d98c118f37089 not found: ID does not exist" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.300809 4922 scope.go:117] "RemoveContainer" containerID="48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e" Nov 28 07:19:40 crc kubenswrapper[4922]: E1128 07:19:40.301141 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e\": container with ID starting with 48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e not found: ID does not exist" containerID="48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.301206 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e"} err="failed to get container status \"48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e\": rpc error: code = NotFound desc = could not find container \"48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e\": container with ID starting with 48c0dde58a57e730bc64b51812695bd28ab94b9805b8a4e14e572c7611b1890e not found: ID does not exist" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.348429 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:40 crc kubenswrapper[4922]: I1128 07:19:40.364397 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:41 crc kubenswrapper[4922]: I1128 07:19:41.422501 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" path="/var/lib/kubelet/pods/21fdc98b-667f-44b1-9fae-87f96ba4b514/volumes" Nov 28 07:19:41 crc kubenswrapper[4922]: E1128 07:19:41.950108 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:41 crc kubenswrapper[4922]: E1128 07:19:41.950515 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:41 crc kubenswrapper[4922]: E1128 07:19:41.950933 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:41 crc kubenswrapper[4922]: E1128 07:19:41.950973 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" Nov 28 07:19:41 crc kubenswrapper[4922]: E1128 07:19:41.952901 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:41 crc kubenswrapper[4922]: E1128 07:19:41.954140 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:41 crc kubenswrapper[4922]: E1128 07:19:41.955510 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:41 crc kubenswrapper[4922]: E1128 07:19:41.955581 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" Nov 28 07:19:42 crc kubenswrapper[4922]: I1128 07:19:42.398948 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:19:42 crc kubenswrapper[4922]: E1128 07:19:42.399458 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:19:43 crc kubenswrapper[4922]: E1128 07:19:43.792837 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:43 crc kubenswrapper[4922]: E1128 07:19:43.793373 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts podName:ca6d488f-6085-4e22-a325-1b749d8c154c nodeName:}" failed. No retries permitted until 2025-11-28 07:19:59.79335401 +0000 UTC m=+1644.713749592 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts") pod "placementdbc9-account-delete-68tbl" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c") : configmap "openstack-scripts" not found Nov 28 07:19:43 crc kubenswrapper[4922]: E1128 07:19:43.793015 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:43 crc kubenswrapper[4922]: E1128 07:19:43.793758 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts podName:3e442fd0-cd46-4c04-afb3-96892d39c0f4 nodeName:}" failed. No retries permitted until 2025-11-28 07:19:59.793746571 +0000 UTC m=+1644.714142153 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts") pod "barbican5229-account-delete-qq87v" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4") : configmap "openstack-scripts" not found Nov 28 07:19:43 crc kubenswrapper[4922]: E1128 07:19:43.793054 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:43 crc kubenswrapper[4922]: E1128 07:19:43.793794 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts podName:2425a44a-c3c8-4533-9aa6-deb657556efb nodeName:}" failed. No retries permitted until 2025-11-28 07:19:59.793787052 +0000 UTC m=+1644.714182634 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts") pod "novaapi78f1-account-delete-mwv4p" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb") : configmap "openstack-scripts" not found Nov 28 07:19:43 crc kubenswrapper[4922]: I1128 07:19:43.913439 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.095716 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-combined-ca-bundle\") pod \"cf191164-20d6-4d60-b111-6373616d9622\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.095795 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-ovndb-tls-certs\") pod \"cf191164-20d6-4d60-b111-6373616d9622\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.095835 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-internal-tls-certs\") pod \"cf191164-20d6-4d60-b111-6373616d9622\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.095921 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-httpd-config\") pod \"cf191164-20d6-4d60-b111-6373616d9622\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.095943 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-public-tls-certs\") pod \"cf191164-20d6-4d60-b111-6373616d9622\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.095960 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pp9n\" (UniqueName: \"kubernetes.io/projected/cf191164-20d6-4d60-b111-6373616d9622-kube-api-access-2pp9n\") pod \"cf191164-20d6-4d60-b111-6373616d9622\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.095993 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-config\") pod \"cf191164-20d6-4d60-b111-6373616d9622\" (UID: \"cf191164-20d6-4d60-b111-6373616d9622\") " Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.121465 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "cf191164-20d6-4d60-b111-6373616d9622" (UID: "cf191164-20d6-4d60-b111-6373616d9622"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.121534 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf191164-20d6-4d60-b111-6373616d9622-kube-api-access-2pp9n" (OuterVolumeSpecName: "kube-api-access-2pp9n") pod "cf191164-20d6-4d60-b111-6373616d9622" (UID: "cf191164-20d6-4d60-b111-6373616d9622"). InnerVolumeSpecName "kube-api-access-2pp9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.173780 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-config" (OuterVolumeSpecName: "config") pod "cf191164-20d6-4d60-b111-6373616d9622" (UID: "cf191164-20d6-4d60-b111-6373616d9622"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.173926 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf191164-20d6-4d60-b111-6373616d9622" (UID: "cf191164-20d6-4d60-b111-6373616d9622"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.177825 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cf191164-20d6-4d60-b111-6373616d9622" (UID: "cf191164-20d6-4d60-b111-6373616d9622"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.186634 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "cf191164-20d6-4d60-b111-6373616d9622" (UID: "cf191164-20d6-4d60-b111-6373616d9622"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.195687 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "cf191164-20d6-4d60-b111-6373616d9622" (UID: "cf191164-20d6-4d60-b111-6373616d9622"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.198443 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.198490 4922 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.198514 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.198534 4922 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.198555 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.198574 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pp9n\" (UniqueName: \"kubernetes.io/projected/cf191164-20d6-4d60-b111-6373616d9622-kube-api-access-2pp9n\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.198594 4922 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/cf191164-20d6-4d60-b111-6373616d9622-config\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.281527 4922 generic.go:334] "Generic (PLEG): container finished" podID="cf191164-20d6-4d60-b111-6373616d9622" containerID="e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e" exitCode=0 Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.281571 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85487d674f-dfq9s" event={"ID":"cf191164-20d6-4d60-b111-6373616d9622","Type":"ContainerDied","Data":"e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e"} Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.281584 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85487d674f-dfq9s" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.281598 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85487d674f-dfq9s" event={"ID":"cf191164-20d6-4d60-b111-6373616d9622","Type":"ContainerDied","Data":"3f27de830fdbd6a4998b2e5549f7f0e951fce01690637c616637dc454dfa92fa"} Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.281616 4922 scope.go:117] "RemoveContainer" containerID="f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.316498 4922 scope.go:117] "RemoveContainer" containerID="e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.351303 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85487d674f-dfq9s"] Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.371812 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-85487d674f-dfq9s"] Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.394326 4922 scope.go:117] "RemoveContainer" containerID="f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49" Nov 28 07:19:44 crc kubenswrapper[4922]: E1128 07:19:44.398637 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49\": container with ID starting with f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49 not found: ID does not exist" containerID="f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.398728 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49"} err="failed to get container status \"f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49\": rpc error: code = NotFound desc = could not find container \"f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49\": container with ID starting with f58fb8e38c1acd1bb650c86ad30a27f2c7340fa1dc53bc9b6f0f13802001dd49 not found: ID does not exist" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.398757 4922 scope.go:117] "RemoveContainer" containerID="e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e" Nov 28 07:19:44 crc kubenswrapper[4922]: E1128 07:19:44.406393 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e\": container with ID starting with e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e not found: ID does not exist" containerID="e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e" Nov 28 07:19:44 crc kubenswrapper[4922]: I1128 07:19:44.406450 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e"} err="failed to get container status \"e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e\": rpc error: code = NotFound desc = could not find container \"e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e\": container with ID starting with e8df143a346e32c48d17729c45b8767a2ed24995c7a3feb7c92a535d6b69654e not found: ID does not exist" Nov 28 07:19:44 crc kubenswrapper[4922]: E1128 07:19:44.910350 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:44 crc kubenswrapper[4922]: E1128 07:19:44.910638 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts podName:1f339784-df58-44f7-947d-9d80559c1c0c nodeName:}" failed. No retries permitted until 2025-11-28 07:20:00.910593657 +0000 UTC m=+1645.830989249 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts") pod "glance75df-account-delete-wdw2x" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c") : configmap "openstack-scripts" not found Nov 28 07:19:44 crc kubenswrapper[4922]: E1128 07:19:44.910850 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:44 crc kubenswrapper[4922]: E1128 07:19:44.910914 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts podName:5e4e7296-ad39-41c1-9399-b3c9072c9158 nodeName:}" failed. No retries permitted until 2025-11-28 07:20:00.910891936 +0000 UTC m=+1645.831287528 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts") pod "novacell032a6-account-delete-tcwgc" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158") : configmap "openstack-scripts" not found Nov 28 07:19:45 crc kubenswrapper[4922]: I1128 07:19:45.353557 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:45 crc kubenswrapper[4922]: I1128 07:19:45.368451 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:45 crc kubenswrapper[4922]: I1128 07:19:45.421417 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf191164-20d6-4d60-b111-6373616d9622" path="/var/lib/kubelet/pods/cf191164-20d6-4d60-b111-6373616d9622/volumes" Nov 28 07:19:46 crc kubenswrapper[4922]: E1128 07:19:46.950152 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:46 crc kubenswrapper[4922]: E1128 07:19:46.951466 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:46 crc kubenswrapper[4922]: E1128 07:19:46.951868 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:46 crc kubenswrapper[4922]: E1128 07:19:46.952946 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 07:19:46 crc kubenswrapper[4922]: E1128 07:19:46.953034 4922 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" Nov 28 07:19:46 crc kubenswrapper[4922]: E1128 07:19:46.956867 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:46 crc kubenswrapper[4922]: E1128 07:19:46.960037 4922 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 07:19:46 crc kubenswrapper[4922]: E1128 07:19:46.960132 4922 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-m9xpz" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" Nov 28 07:19:50 crc kubenswrapper[4922]: I1128 07:19:50.358368 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:50 crc kubenswrapper[4922]: I1128 07:19:50.373397 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.352300 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-m9xpz_ec882eb7-01fb-4f7f-bad8-812346e5880e/ovs-vswitchd/0.log" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.353901 4922 generic.go:334] "Generic (PLEG): container finished" podID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" exitCode=137 Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.353954 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m9xpz" event={"ID":"ec882eb7-01fb-4f7f-bad8-812346e5880e","Type":"ContainerDied","Data":"35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2"} Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.752018 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-m9xpz_ec882eb7-01fb-4f7f-bad8-812346e5880e/ovs-vswitchd/0.log" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.753450 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.936302 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec882eb7-01fb-4f7f-bad8-812346e5880e-scripts\") pod \"ec882eb7-01fb-4f7f-bad8-812346e5880e\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.936475 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-run\") pod \"ec882eb7-01fb-4f7f-bad8-812346e5880e\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.936535 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-log\") pod \"ec882eb7-01fb-4f7f-bad8-812346e5880e\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.936617 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqgwh\" (UniqueName: \"kubernetes.io/projected/ec882eb7-01fb-4f7f-bad8-812346e5880e-kube-api-access-mqgwh\") pod \"ec882eb7-01fb-4f7f-bad8-812346e5880e\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.936846 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-etc-ovs\") pod \"ec882eb7-01fb-4f7f-bad8-812346e5880e\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.936909 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-lib\") pod \"ec882eb7-01fb-4f7f-bad8-812346e5880e\" (UID: \"ec882eb7-01fb-4f7f-bad8-812346e5880e\") " Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.937168 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "ec882eb7-01fb-4f7f-bad8-812346e5880e" (UID: "ec882eb7-01fb-4f7f-bad8-812346e5880e"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.937168 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-log" (OuterVolumeSpecName: "var-log") pod "ec882eb7-01fb-4f7f-bad8-812346e5880e" (UID: "ec882eb7-01fb-4f7f-bad8-812346e5880e"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.937246 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-lib" (OuterVolumeSpecName: "var-lib") pod "ec882eb7-01fb-4f7f-bad8-812346e5880e" (UID: "ec882eb7-01fb-4f7f-bad8-812346e5880e"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.937453 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-run" (OuterVolumeSpecName: "var-run") pod "ec882eb7-01fb-4f7f-bad8-812346e5880e" (UID: "ec882eb7-01fb-4f7f-bad8-812346e5880e"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.938505 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec882eb7-01fb-4f7f-bad8-812346e5880e-scripts" (OuterVolumeSpecName: "scripts") pod "ec882eb7-01fb-4f7f-bad8-812346e5880e" (UID: "ec882eb7-01fb-4f7f-bad8-812346e5880e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.937743 4922 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.938797 4922 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.938878 4922 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-lib\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:51 crc kubenswrapper[4922]: I1128 07:19:51.943379 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec882eb7-01fb-4f7f-bad8-812346e5880e-kube-api-access-mqgwh" (OuterVolumeSpecName: "kube-api-access-mqgwh") pod "ec882eb7-01fb-4f7f-bad8-812346e5880e" (UID: "ec882eb7-01fb-4f7f-bad8-812346e5880e"). InnerVolumeSpecName "kube-api-access-mqgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.040563 4922 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec882eb7-01fb-4f7f-bad8-812346e5880e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.040596 4922 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec882eb7-01fb-4f7f-bad8-812346e5880e-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.040605 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqgwh\" (UniqueName: \"kubernetes.io/projected/ec882eb7-01fb-4f7f-bad8-812346e5880e-kube-api-access-mqgwh\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.369472 4922 generic.go:334] "Generic (PLEG): container finished" podID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerID="ca8c40529da8875dd3000d01b8bd8b36258e6bc188214902c4efe8f876ef3f55" exitCode=137 Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.369496 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"ca8c40529da8875dd3000d01b8bd8b36258e6bc188214902c4efe8f876ef3f55"} Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.371954 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-m9xpz_ec882eb7-01fb-4f7f-bad8-812346e5880e/ovs-vswitchd/0.log" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.372834 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m9xpz" event={"ID":"ec882eb7-01fb-4f7f-bad8-812346e5880e","Type":"ContainerDied","Data":"e8931293e26c27ffc2711e53ca6ee67c9e2f3aab83ae99464d7ea58d035ba809"} Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.372875 4922 scope.go:117] "RemoveContainer" containerID="35985199ebc15c8748b4f22bfafda9ad3d4b2a2643cfa7d6df842376383f82a2" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.372909 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-m9xpz" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.409733 4922 scope.go:117] "RemoveContainer" containerID="7588b9fec893bcd865ba591436037cd645135e83e041b11ecddc7dce8fa1ace5" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.414389 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-m9xpz"] Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.420658 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-m9xpz"] Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.435810 4922 scope.go:117] "RemoveContainer" containerID="be503fab67fd063d8f5a6fe862d508920bd019c3b2a011b44c4391dfea4c60e2" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.593960 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.750676 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-lock\") pod \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.750761 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") pod \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.750846 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jcg5\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-kube-api-access-4jcg5\") pod \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.750884 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.750979 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-cache\") pod \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\" (UID: \"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe\") " Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.751843 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-lock" (OuterVolumeSpecName: "lock") pod "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.752172 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-cache" (OuterVolumeSpecName: "cache") pod "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.756882 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "swift") pod "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.758094 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.763406 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-kube-api-access-4jcg5" (OuterVolumeSpecName: "kube-api-access-4jcg5") pod "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" (UID: "46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe"). InnerVolumeSpecName "kube-api-access-4jcg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.852793 4922 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-cache\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.852828 4922 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-lock\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.852837 4922 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.852846 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jcg5\" (UniqueName: \"kubernetes.io/projected/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe-kube-api-access-4jcg5\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.852875 4922 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.868310 4922 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 28 07:19:52 crc kubenswrapper[4922]: I1128 07:19:52.954713 4922 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:53 crc kubenswrapper[4922]: E1128 07:19:53.054006 4922 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/68d3f0355797a1aa758614c6a686bf1ebc84fda23b20f147305fd85f09137066/diff" to get inode usage: stat /var/lib/containers/storage/overlay/68d3f0355797a1aa758614c6a686bf1ebc84fda23b20f147305fd85f09137066/diff: no such file or directory, extraDiskErr: Nov 28 07:19:53 crc kubenswrapper[4922]: E1128 07:19:53.370175 4922 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/19813b81facdbd1ca56b65cf1b12df15b3342976d5244d4c32addcf8de880036/diff" to get inode usage: stat /var/lib/containers/storage/overlay/19813b81facdbd1ca56b65cf1b12df15b3342976d5244d4c32addcf8de880036/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_neutron-85487d674f-dfq9s_cf191164-20d6-4d60-b111-6373616d9622/neutron-api/0.log" to get inode usage: stat /var/log/pods/openstack_neutron-85487d674f-dfq9s_cf191164-20d6-4d60-b111-6373616d9622/neutron-api/0.log: no such file or directory Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.423282 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" path="/var/lib/kubelet/pods/ec882eb7-01fb-4f7f-bad8-812346e5880e/volumes" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.447663 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe","Type":"ContainerDied","Data":"faa40d67094345d06ccb5f1f49589a811c96247ba3b8169fb17210e5a0cd654a"} Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.447713 4922 scope.go:117] "RemoveContainer" containerID="ca8c40529da8875dd3000d01b8bd8b36258e6bc188214902c4efe8f876ef3f55" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.447754 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.475421 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.486585 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.497753 4922 scope.go:117] "RemoveContainer" containerID="be7c02ef3f8f4787f48ed58720143cde50c0717059e0ee9cfb0efd2e23816536" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.531163 4922 scope.go:117] "RemoveContainer" containerID="cdce219795ca597bf90a3ef5a37914a0508699345238c0f783326c7603848cb7" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.557582 4922 scope.go:117] "RemoveContainer" containerID="f0b2a682fe557d959bf583e5ecb3012d332cc749a2c216c33f7c5ba7ffe503cb" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.576979 4922 scope.go:117] "RemoveContainer" containerID="a015be7baa9e4f6ad1e91446da7a6a6130d283d562293232f646fbaf0306cb39" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.590548 4922 scope.go:117] "RemoveContainer" containerID="b375cb1d189915e8086ce1f9c17697360db2e2a53cd535ab4f5f9cf1df90a46c" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.606067 4922 scope.go:117] "RemoveContainer" containerID="e4501a3ca18be2fca539c740e8acc3816b850c674385c2c3d353d186ec5bbec5" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.624076 4922 scope.go:117] "RemoveContainer" containerID="12b16430a89a5b1e56a5f6468f519f0cc78f9332249e9e70b835663f8c8cf7f6" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.638023 4922 scope.go:117] "RemoveContainer" containerID="853f98691c2c9eea9fa65f3f60694178106dd650a20abb2f93f5e169033283dc" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.655855 4922 scope.go:117] "RemoveContainer" containerID="23b4034aad6d0adc0dd28dbcdf65c0e6bd65ce23fd1a27f18dd635918d96a2c8" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.678682 4922 scope.go:117] "RemoveContainer" containerID="7c24d63778c887d25e502fb8e85199d831af25e203c2a011764b487e1c2c78a1" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.694849 4922 scope.go:117] "RemoveContainer" containerID="ee22fa27f603d2ddf49e9b07c51a65a98b3a7d08d855a70a1f3939851f7f60c5" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.716256 4922 scope.go:117] "RemoveContainer" containerID="26dd3a6e06dd6158dbd36b9a5fc4871c38c4a4f2e97a3e98df0085998f6374ae" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.743958 4922 scope.go:117] "RemoveContainer" containerID="67652f0b906ad1c1976e1f6ddc6a979b5ca575b0328d4c60af513fc48df7cb8c" Nov 28 07:19:53 crc kubenswrapper[4922]: I1128 07:19:53.763569 4922 scope.go:117] "RemoveContainer" containerID="d3b90768a305d76f6bbe78d7fd4d3b39f50f3a28bba75d44bc70674b4aca8f70" Nov 28 07:19:54 crc kubenswrapper[4922]: E1128 07:19:54.173627 4922 fsHandler.go:119] failed to collect filesystem stats - rootDiskErr: could not stat "/var/lib/containers/storage/overlay/b34061793bccacc79a4fefb220761a63c57f3027f700f61fb205c085a6442f7f/diff" to get inode usage: stat /var/lib/containers/storage/overlay/b34061793bccacc79a4fefb220761a63c57f3027f700f61fb205c085a6442f7f/diff: no such file or directory, extraDiskErr: could not stat "/var/log/pods/openstack_ovn-controller-ovs-m9xpz_ec882eb7-01fb-4f7f-bad8-812346e5880e/ovs-vswitchd/0.log" to get inode usage: stat /var/log/pods/openstack_ovn-controller-ovs-m9xpz_ec882eb7-01fb-4f7f-bad8-812346e5880e/ovs-vswitchd/0.log: no such file or directory Nov 28 07:19:55 crc kubenswrapper[4922]: I1128 07:19:55.364510 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:55 crc kubenswrapper[4922]: I1128 07:19:55.379533 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 07:19:55 crc kubenswrapper[4922]: I1128 07:19:55.418977 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" path="/var/lib/kubelet/pods/46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe/volumes" Nov 28 07:19:55 crc kubenswrapper[4922]: I1128 07:19:55.883774 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:55 crc kubenswrapper[4922]: I1128 07:19:55.988789 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.016514 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data\") pod \"454a2683-850f-4ce0-8ebe-7758105dd255\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.016697 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data-custom\") pod \"454a2683-850f-4ce0-8ebe-7758105dd255\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.016763 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7864\" (UniqueName: \"kubernetes.io/projected/454a2683-850f-4ce0-8ebe-7758105dd255-kube-api-access-b7864\") pod \"454a2683-850f-4ce0-8ebe-7758105dd255\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.016803 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/454a2683-850f-4ce0-8ebe-7758105dd255-logs\") pod \"454a2683-850f-4ce0-8ebe-7758105dd255\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.017028 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-combined-ca-bundle\") pod \"454a2683-850f-4ce0-8ebe-7758105dd255\" (UID: \"454a2683-850f-4ce0-8ebe-7758105dd255\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.017668 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/454a2683-850f-4ce0-8ebe-7758105dd255-logs" (OuterVolumeSpecName: "logs") pod "454a2683-850f-4ce0-8ebe-7758105dd255" (UID: "454a2683-850f-4ce0-8ebe-7758105dd255"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.022355 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/454a2683-850f-4ce0-8ebe-7758105dd255-kube-api-access-b7864" (OuterVolumeSpecName: "kube-api-access-b7864") pod "454a2683-850f-4ce0-8ebe-7758105dd255" (UID: "454a2683-850f-4ce0-8ebe-7758105dd255"). InnerVolumeSpecName "kube-api-access-b7864". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.023358 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "454a2683-850f-4ce0-8ebe-7758105dd255" (UID: "454a2683-850f-4ce0-8ebe-7758105dd255"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.041323 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "454a2683-850f-4ce0-8ebe-7758105dd255" (UID: "454a2683-850f-4ce0-8ebe-7758105dd255"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.053583 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data" (OuterVolumeSpecName: "config-data") pod "454a2683-850f-4ce0-8ebe-7758105dd255" (UID: "454a2683-850f-4ce0-8ebe-7758105dd255"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.117833 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szv69\" (UniqueName: \"kubernetes.io/projected/6ef73d39-2ed2-4168-8598-e0749aa0a26b-kube-api-access-szv69\") pod \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.117922 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef73d39-2ed2-4168-8598-e0749aa0a26b-logs\") pod \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118116 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-combined-ca-bundle\") pod \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118150 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data-custom\") pod \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118185 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data\") pod \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\" (UID: \"6ef73d39-2ed2-4168-8598-e0749aa0a26b\") " Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118498 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ef73d39-2ed2-4168-8598-e0749aa0a26b-logs" (OuterVolumeSpecName: "logs") pod "6ef73d39-2ed2-4168-8598-e0749aa0a26b" (UID: "6ef73d39-2ed2-4168-8598-e0749aa0a26b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118699 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118731 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118748 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ef73d39-2ed2-4168-8598-e0749aa0a26b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118767 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/454a2683-850f-4ce0-8ebe-7758105dd255-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118784 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7864\" (UniqueName: \"kubernetes.io/projected/454a2683-850f-4ce0-8ebe-7758105dd255-kube-api-access-b7864\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.118803 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/454a2683-850f-4ce0-8ebe-7758105dd255-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.121150 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6ef73d39-2ed2-4168-8598-e0749aa0a26b" (UID: "6ef73d39-2ed2-4168-8598-e0749aa0a26b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.122662 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ef73d39-2ed2-4168-8598-e0749aa0a26b-kube-api-access-szv69" (OuterVolumeSpecName: "kube-api-access-szv69") pod "6ef73d39-2ed2-4168-8598-e0749aa0a26b" (UID: "6ef73d39-2ed2-4168-8598-e0749aa0a26b"). InnerVolumeSpecName "kube-api-access-szv69". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.137674 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ef73d39-2ed2-4168-8598-e0749aa0a26b" (UID: "6ef73d39-2ed2-4168-8598-e0749aa0a26b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.151817 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data" (OuterVolumeSpecName: "config-data") pod "6ef73d39-2ed2-4168-8598-e0749aa0a26b" (UID: "6ef73d39-2ed2-4168-8598-e0749aa0a26b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.219925 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szv69\" (UniqueName: \"kubernetes.io/projected/6ef73d39-2ed2-4168-8598-e0749aa0a26b-kube-api-access-szv69\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.219967 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.219978 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.219989 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ef73d39-2ed2-4168-8598-e0749aa0a26b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.496490 4922 generic.go:334] "Generic (PLEG): container finished" podID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerID="10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a" exitCode=137 Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.496563 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b597c9f45-g422b" event={"ID":"6ef73d39-2ed2-4168-8598-e0749aa0a26b","Type":"ContainerDied","Data":"10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a"} Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.496617 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-b597c9f45-g422b" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.496646 4922 scope.go:117] "RemoveContainer" containerID="10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.496631 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-b597c9f45-g422b" event={"ID":"6ef73d39-2ed2-4168-8598-e0749aa0a26b","Type":"ContainerDied","Data":"31b1b75d807a8cc6d66f615108f0120c087831e1386533fe825bfcb303db49b4"} Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.500685 4922 generic.go:334] "Generic (PLEG): container finished" podID="454a2683-850f-4ce0-8ebe-7758105dd255" containerID="5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb" exitCode=137 Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.500754 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" event={"ID":"454a2683-850f-4ce0-8ebe-7758105dd255","Type":"ContainerDied","Data":"5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb"} Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.500797 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.500810 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7d9b9667cd-5cmld" event={"ID":"454a2683-850f-4ce0-8ebe-7758105dd255","Type":"ContainerDied","Data":"82d9e543274e4be8f4826c69d7ecc1131e9e082d0a1ba62067cf8480c2e27a01"} Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.532809 4922 scope.go:117] "RemoveContainer" containerID="5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.576932 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-b597c9f45-g422b"] Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.587359 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-b597c9f45-g422b"] Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.587592 4922 scope.go:117] "RemoveContainer" containerID="10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a" Nov 28 07:19:56 crc kubenswrapper[4922]: E1128 07:19:56.588343 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a\": container with ID starting with 10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a not found: ID does not exist" containerID="10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.588371 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a"} err="failed to get container status \"10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a\": rpc error: code = NotFound desc = could not find container \"10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a\": container with ID starting with 10a36e42981000e949b616fd12cf1f58388e831215f4f330d4cdebd8733d1f6a not found: ID does not exist" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.588391 4922 scope.go:117] "RemoveContainer" containerID="5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd" Nov 28 07:19:56 crc kubenswrapper[4922]: E1128 07:19:56.589128 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd\": container with ID starting with 5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd not found: ID does not exist" containerID="5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.589148 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd"} err="failed to get container status \"5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd\": rpc error: code = NotFound desc = could not find container \"5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd\": container with ID starting with 5fac949f1f96664181265a651447466463b94e2a2793695fa4197d9252bd2acd not found: ID does not exist" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.589161 4922 scope.go:117] "RemoveContainer" containerID="5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.595623 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-7d9b9667cd-5cmld"] Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.601278 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-7d9b9667cd-5cmld"] Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.650976 4922 scope.go:117] "RemoveContainer" containerID="44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.680099 4922 scope.go:117] "RemoveContainer" containerID="5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb" Nov 28 07:19:56 crc kubenswrapper[4922]: E1128 07:19:56.680658 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb\": container with ID starting with 5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb not found: ID does not exist" containerID="5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.680712 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb"} err="failed to get container status \"5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb\": rpc error: code = NotFound desc = could not find container \"5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb\": container with ID starting with 5a82b37419ba4b4e6e4d1a23689e9c832d484d7b2a65dc11f8dd0cb4e950f0bb not found: ID does not exist" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.680746 4922 scope.go:117] "RemoveContainer" containerID="44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c" Nov 28 07:19:56 crc kubenswrapper[4922]: E1128 07:19:56.681076 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c\": container with ID starting with 44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c not found: ID does not exist" containerID="44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c" Nov 28 07:19:56 crc kubenswrapper[4922]: I1128 07:19:56.681134 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c"} err="failed to get container status \"44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c\": rpc error: code = NotFound desc = could not find container \"44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c\": container with ID starting with 44df4a4a257621f216f61d08c9cd358b5d8198f41a5d916577451fe0af7bb74c not found: ID does not exist" Nov 28 07:19:57 crc kubenswrapper[4922]: I1128 07:19:57.398409 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:19:57 crc kubenswrapper[4922]: E1128 07:19:57.398613 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:19:57 crc kubenswrapper[4922]: I1128 07:19:57.413667 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" path="/var/lib/kubelet/pods/454a2683-850f-4ce0-8ebe-7758105dd255/volumes" Nov 28 07:19:57 crc kubenswrapper[4922]: I1128 07:19:57.414760 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" path="/var/lib/kubelet/pods/6ef73d39-2ed2-4168-8598-e0749aa0a26b/volumes" Nov 28 07:19:57 crc kubenswrapper[4922]: I1128 07:19:57.912407 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": EOF" Nov 28 07:19:57 crc kubenswrapper[4922]: I1128 07:19:57.912433 4922 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-757bbb5fbd-lx4kn" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.206:9311/healthcheck\": EOF" Nov 28 07:19:58 crc kubenswrapper[4922]: I1128 07:19:58.535529 4922 generic.go:334] "Generic (PLEG): container finished" podID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerID="076165d9817bbf196b798dd4cf1a2a4dea75e096874598893a033392c7a13f59" exitCode=137 Nov 28 07:19:58 crc kubenswrapper[4922]: I1128 07:19:58.535613 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-757bbb5fbd-lx4kn" event={"ID":"f1e0e318-5b90-4c18-ba95-fc261ffb519d","Type":"ContainerDied","Data":"076165d9817bbf196b798dd4cf1a2a4dea75e096874598893a033392c7a13f59"} Nov 28 07:19:58 crc kubenswrapper[4922]: I1128 07:19:58.934789 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.072555 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data\") pod \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.072693 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e0e318-5b90-4c18-ba95-fc261ffb519d-logs\") pod \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.072785 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-combined-ca-bundle\") pod \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.072849 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data-custom\") pod \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.072979 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-internal-tls-certs\") pod \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.073030 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-public-tls-certs\") pod \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.073106 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88wn4\" (UniqueName: \"kubernetes.io/projected/f1e0e318-5b90-4c18-ba95-fc261ffb519d-kube-api-access-88wn4\") pod \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\" (UID: \"f1e0e318-5b90-4c18-ba95-fc261ffb519d\") " Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.073367 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e0e318-5b90-4c18-ba95-fc261ffb519d-logs" (OuterVolumeSpecName: "logs") pod "f1e0e318-5b90-4c18-ba95-fc261ffb519d" (UID: "f1e0e318-5b90-4c18-ba95-fc261ffb519d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.073858 4922 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e0e318-5b90-4c18-ba95-fc261ffb519d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.078195 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e0e318-5b90-4c18-ba95-fc261ffb519d-kube-api-access-88wn4" (OuterVolumeSpecName: "kube-api-access-88wn4") pod "f1e0e318-5b90-4c18-ba95-fc261ffb519d" (UID: "f1e0e318-5b90-4c18-ba95-fc261ffb519d"). InnerVolumeSpecName "kube-api-access-88wn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.080453 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f1e0e318-5b90-4c18-ba95-fc261ffb519d" (UID: "f1e0e318-5b90-4c18-ba95-fc261ffb519d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.116595 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1e0e318-5b90-4c18-ba95-fc261ffb519d" (UID: "f1e0e318-5b90-4c18-ba95-fc261ffb519d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.122563 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data" (OuterVolumeSpecName: "config-data") pod "f1e0e318-5b90-4c18-ba95-fc261ffb519d" (UID: "f1e0e318-5b90-4c18-ba95-fc261ffb519d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.132716 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f1e0e318-5b90-4c18-ba95-fc261ffb519d" (UID: "f1e0e318-5b90-4c18-ba95-fc261ffb519d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.140582 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f1e0e318-5b90-4c18-ba95-fc261ffb519d" (UID: "f1e0e318-5b90-4c18-ba95-fc261ffb519d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.175423 4922 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.175483 4922 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.175504 4922 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.175522 4922 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.175539 4922 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1e0e318-5b90-4c18-ba95-fc261ffb519d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.175556 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88wn4\" (UniqueName: \"kubernetes.io/projected/f1e0e318-5b90-4c18-ba95-fc261ffb519d-kube-api-access-88wn4\") on node \"crc\" DevicePath \"\"" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.549292 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-757bbb5fbd-lx4kn" event={"ID":"f1e0e318-5b90-4c18-ba95-fc261ffb519d","Type":"ContainerDied","Data":"e63699a485817ccaccdf5b0db749f1a06973bbcadeed876b228797dbc9d18497"} Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.549332 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-757bbb5fbd-lx4kn" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.549392 4922 scope.go:117] "RemoveContainer" containerID="076165d9817bbf196b798dd4cf1a2a4dea75e096874598893a033392c7a13f59" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.585998 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-757bbb5fbd-lx4kn"] Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.588079 4922 scope.go:117] "RemoveContainer" containerID="e36de2182dd35fee67fc5ebf689f54d2adc2e37e33fa381890f944ad027f5c3a" Nov 28 07:19:59 crc kubenswrapper[4922]: I1128 07:19:59.592712 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-757bbb5fbd-lx4kn"] Nov 28 07:19:59 crc kubenswrapper[4922]: E1128 07:19:59.885163 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:59 crc kubenswrapper[4922]: E1128 07:19:59.885204 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:59 crc kubenswrapper[4922]: E1128 07:19:59.885290 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:19:59 crc kubenswrapper[4922]: E1128 07:19:59.885308 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts podName:2425a44a-c3c8-4533-9aa6-deb657556efb nodeName:}" failed. No retries permitted until 2025-11-28 07:20:31.885278045 +0000 UTC m=+1676.805673657 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts") pod "novaapi78f1-account-delete-mwv4p" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb") : configmap "openstack-scripts" not found Nov 28 07:19:59 crc kubenswrapper[4922]: E1128 07:19:59.885393 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts podName:ca6d488f-6085-4e22-a325-1b749d8c154c nodeName:}" failed. No retries permitted until 2025-11-28 07:20:31.885368448 +0000 UTC m=+1676.805764060 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts") pod "placementdbc9-account-delete-68tbl" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c") : configmap "openstack-scripts" not found Nov 28 07:19:59 crc kubenswrapper[4922]: E1128 07:19:59.885414 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts podName:3e442fd0-cd46-4c04-afb3-96892d39c0f4 nodeName:}" failed. No retries permitted until 2025-11-28 07:20:31.885402878 +0000 UTC m=+1676.805798500 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts") pod "barbican5229-account-delete-qq87v" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4") : configmap "openstack-scripts" not found Nov 28 07:20:00 crc kubenswrapper[4922]: I1128 07:20:00.581178 4922 generic.go:334] "Generic (PLEG): container finished" podID="ca6d488f-6085-4e22-a325-1b749d8c154c" containerID="bb3ce5985f855b9f6b316fd8b713d9eabb904b705a7cf10e089e680825de9d7f" exitCode=137 Nov 28 07:20:00 crc kubenswrapper[4922]: I1128 07:20:00.581283 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementdbc9-account-delete-68tbl" event={"ID":"ca6d488f-6085-4e22-a325-1b749d8c154c","Type":"ContainerDied","Data":"bb3ce5985f855b9f6b316fd8b713d9eabb904b705a7cf10e089e680825de9d7f"} Nov 28 07:20:00 crc kubenswrapper[4922]: I1128 07:20:00.841695 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.000128 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts\") pod \"ca6d488f-6085-4e22-a325-1b749d8c154c\" (UID: \"ca6d488f-6085-4e22-a325-1b749d8c154c\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.000331 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbd82\" (UniqueName: \"kubernetes.io/projected/ca6d488f-6085-4e22-a325-1b749d8c154c-kube-api-access-dbd82\") pod \"ca6d488f-6085-4e22-a325-1b749d8c154c\" (UID: \"ca6d488f-6085-4e22-a325-1b749d8c154c\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.001561 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ca6d488f-6085-4e22-a325-1b749d8c154c" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.005595 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca6d488f-6085-4e22-a325-1b749d8c154c-kube-api-access-dbd82" (OuterVolumeSpecName: "kube-api-access-dbd82") pod "ca6d488f-6085-4e22-a325-1b749d8c154c" (UID: "ca6d488f-6085-4e22-a325-1b749d8c154c"). InnerVolumeSpecName "kube-api-access-dbd82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: E1128 07:20:01.005812 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:20:01 crc kubenswrapper[4922]: E1128 07:20:01.005921 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts podName:1f339784-df58-44f7-947d-9d80559c1c0c nodeName:}" failed. No retries permitted until 2025-11-28 07:20:33.005893324 +0000 UTC m=+1677.926288946 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts") pod "glance75df-account-delete-wdw2x" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c") : configmap "openstack-scripts" not found Nov 28 07:20:01 crc kubenswrapper[4922]: E1128 07:20:01.009431 4922 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 07:20:01 crc kubenswrapper[4922]: E1128 07:20:01.009507 4922 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts podName:5e4e7296-ad39-41c1-9399-b3c9072c9158 nodeName:}" failed. No retries permitted until 2025-11-28 07:20:33.009489451 +0000 UTC m=+1677.929885043 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts") pod "novacell032a6-account-delete-tcwgc" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158") : configmap "openstack-scripts" not found Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.009563 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbd82\" (UniqueName: \"kubernetes.io/projected/ca6d488f-6085-4e22-a325-1b749d8c154c-kube-api-access-dbd82\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.009580 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca6d488f-6085-4e22-a325-1b749d8c154c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.232160 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.313535 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts\") pod \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\" (UID: \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.313756 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ph6f\" (UniqueName: \"kubernetes.io/projected/3e442fd0-cd46-4c04-afb3-96892d39c0f4-kube-api-access-8ph6f\") pod \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\" (UID: \"3e442fd0-cd46-4c04-afb3-96892d39c0f4\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.314363 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3e442fd0-cd46-4c04-afb3-96892d39c0f4" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.319745 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e442fd0-cd46-4c04-afb3-96892d39c0f4-kube-api-access-8ph6f" (OuterVolumeSpecName: "kube-api-access-8ph6f") pod "3e442fd0-cd46-4c04-afb3-96892d39c0f4" (UID: "3e442fd0-cd46-4c04-afb3-96892d39c0f4"). InnerVolumeSpecName "kube-api-access-8ph6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.408357 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" path="/var/lib/kubelet/pods/f1e0e318-5b90-4c18-ba95-fc261ffb519d/volumes" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.415951 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ph6f\" (UniqueName: \"kubernetes.io/projected/3e442fd0-cd46-4c04-afb3-96892d39c0f4-kube-api-access-8ph6f\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.415990 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e442fd0-cd46-4c04-afb3-96892d39c0f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.505526 4922 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod7dfc2e52-b959-4718-8f85-5bcec1a8ad10"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod7dfc2e52-b959-4718-8f85-5bcec1a8ad10] : Timed out while waiting for systemd to remove kubepods-besteffort-pod7dfc2e52_b959_4718_8f85_5bcec1a8ad10.slice" Nov 28 07:20:01 crc kubenswrapper[4922]: E1128 07:20:01.505574 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod7dfc2e52-b959-4718-8f85-5bcec1a8ad10] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod7dfc2e52-b959-4718-8f85-5bcec1a8ad10] : Timed out while waiting for systemd to remove kubepods-besteffort-pod7dfc2e52_b959_4718_8f85_5bcec1a8ad10.slice" pod="openstack/nova-cell0-conductor-0" podUID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.576929 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.601643 4922 generic.go:334] "Generic (PLEG): container finished" podID="1f339784-df58-44f7-947d-9d80559c1c0c" containerID="e61f1fa7822b5f2b3605c61ec01fc8d12a428bfdcca76852ff14c1bcc915a5cd" exitCode=137 Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.601695 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance75df-account-delete-wdw2x" event={"ID":"1f339784-df58-44f7-947d-9d80559c1c0c","Type":"ContainerDied","Data":"e61f1fa7822b5f2b3605c61ec01fc8d12a428bfdcca76852ff14c1bcc915a5cd"} Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.603484 4922 generic.go:334] "Generic (PLEG): container finished" podID="2425a44a-c3c8-4533-9aa6-deb657556efb" containerID="d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e" exitCode=137 Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.603558 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi78f1-account-delete-mwv4p" event={"ID":"2425a44a-c3c8-4533-9aa6-deb657556efb","Type":"ContainerDied","Data":"d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e"} Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.603578 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi78f1-account-delete-mwv4p" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.603599 4922 scope.go:117] "RemoveContainer" containerID="d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.603588 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi78f1-account-delete-mwv4p" event={"ID":"2425a44a-c3c8-4533-9aa6-deb657556efb","Type":"ContainerDied","Data":"7bfa8c1248500fef224bcac683aacd82b8b175f254693177bb479a8cb36f835f"} Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.606733 4922 generic.go:334] "Generic (PLEG): container finished" podID="3e442fd0-cd46-4c04-afb3-96892d39c0f4" containerID="48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9" exitCode=137 Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.606784 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican5229-account-delete-qq87v" event={"ID":"3e442fd0-cd46-4c04-afb3-96892d39c0f4","Type":"ContainerDied","Data":"48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9"} Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.606803 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican5229-account-delete-qq87v" event={"ID":"3e442fd0-cd46-4c04-afb3-96892d39c0f4","Type":"ContainerDied","Data":"5341f6059602566c33672ba2224e2aeacf76ae8c7da5acca05962074ca8a9a0d"} Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.606847 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican5229-account-delete-qq87v" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.611263 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementdbc9-account-delete-68tbl" event={"ID":"ca6d488f-6085-4e22-a325-1b749d8c154c","Type":"ContainerDied","Data":"3c1ae0c005e1f6a82024e6b130e9743b9a7886d2c5ab43b54b49958beb409f77"} Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.611456 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementdbc9-account-delete-68tbl" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.616659 4922 generic.go:334] "Generic (PLEG): container finished" podID="5e4e7296-ad39-41c1-9399-b3c9072c9158" containerID="1b294e250b4e8005476292722797df8e5418fe5de28dbe00f75e79efc8faf395" exitCode=137 Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.616749 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.617543 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell032a6-account-delete-tcwgc" event={"ID":"5e4e7296-ad39-41c1-9399-b3c9072c9158","Type":"ContainerDied","Data":"1b294e250b4e8005476292722797df8e5418fe5de28dbe00f75e79efc8faf395"} Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.617758 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hj2p7\" (UniqueName: \"kubernetes.io/projected/2425a44a-c3c8-4533-9aa6-deb657556efb-kube-api-access-hj2p7\") pod \"2425a44a-c3c8-4533-9aa6-deb657556efb\" (UID: \"2425a44a-c3c8-4533-9aa6-deb657556efb\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.617833 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts\") pod \"2425a44a-c3c8-4533-9aa6-deb657556efb\" (UID: \"2425a44a-c3c8-4533-9aa6-deb657556efb\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.618641 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2425a44a-c3c8-4533-9aa6-deb657556efb" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.632423 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican5229-account-delete-qq87v"] Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.636947 4922 scope.go:117] "RemoveContainer" containerID="d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e" Nov 28 07:20:01 crc kubenswrapper[4922]: E1128 07:20:01.639949 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e\": container with ID starting with d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e not found: ID does not exist" containerID="d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.640031 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e"} err="failed to get container status \"d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e\": rpc error: code = NotFound desc = could not find container \"d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e\": container with ID starting with d7bcecc803abbf48fe4458864ac3df0f1c2163fcc4bbd9b6d2dd3b5b90dbbe4e not found: ID does not exist" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.640055 4922 scope.go:117] "RemoveContainer" containerID="48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.640345 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican5229-account-delete-qq87v"] Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.640371 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2425a44a-c3c8-4533-9aa6-deb657556efb-kube-api-access-hj2p7" (OuterVolumeSpecName: "kube-api-access-hj2p7") pod "2425a44a-c3c8-4533-9aa6-deb657556efb" (UID: "2425a44a-c3c8-4533-9aa6-deb657556efb"). InnerVolumeSpecName "kube-api-access-hj2p7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.655629 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementdbc9-account-delete-68tbl"] Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.667651 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placementdbc9-account-delete-68tbl"] Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.684763 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.685661 4922 scope.go:117] "RemoveContainer" containerID="48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9" Nov 28 07:20:01 crc kubenswrapper[4922]: E1128 07:20:01.686128 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9\": container with ID starting with 48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9 not found: ID does not exist" containerID="48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.686171 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9"} err="failed to get container status \"48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9\": rpc error: code = NotFound desc = could not find container \"48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9\": container with ID starting with 48a3ef178ad02775387bcd01d0bac70332b409ebbeaf25296cb9c690b01503c9 not found: ID does not exist" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.686266 4922 scope.go:117] "RemoveContainer" containerID="bb3ce5985f855b9f6b316fd8b713d9eabb904b705a7cf10e089e680825de9d7f" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.694618 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.719845 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2425a44a-c3c8-4533-9aa6-deb657556efb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.719890 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hj2p7\" (UniqueName: \"kubernetes.io/projected/2425a44a-c3c8-4533-9aa6-deb657556efb-kube-api-access-hj2p7\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.743914 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.748462 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.821022 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2gk9\" (UniqueName: \"kubernetes.io/projected/5e4e7296-ad39-41c1-9399-b3c9072c9158-kube-api-access-c2gk9\") pod \"5e4e7296-ad39-41c1-9399-b3c9072c9158\" (UID: \"5e4e7296-ad39-41c1-9399-b3c9072c9158\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.821126 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts\") pod \"5e4e7296-ad39-41c1-9399-b3c9072c9158\" (UID: \"5e4e7296-ad39-41c1-9399-b3c9072c9158\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.821168 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldx9v\" (UniqueName: \"kubernetes.io/projected/1f339784-df58-44f7-947d-9d80559c1c0c-kube-api-access-ldx9v\") pod \"1f339784-df58-44f7-947d-9d80559c1c0c\" (UID: \"1f339784-df58-44f7-947d-9d80559c1c0c\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.821367 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts\") pod \"1f339784-df58-44f7-947d-9d80559c1c0c\" (UID: \"1f339784-df58-44f7-947d-9d80559c1c0c\") " Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.821655 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5e4e7296-ad39-41c1-9399-b3c9072c9158" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.821749 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e4e7296-ad39-41c1-9399-b3c9072c9158-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.821786 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1f339784-df58-44f7-947d-9d80559c1c0c" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.824370 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f339784-df58-44f7-947d-9d80559c1c0c-kube-api-access-ldx9v" (OuterVolumeSpecName: "kube-api-access-ldx9v") pod "1f339784-df58-44f7-947d-9d80559c1c0c" (UID: "1f339784-df58-44f7-947d-9d80559c1c0c"). InnerVolumeSpecName "kube-api-access-ldx9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.825721 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e4e7296-ad39-41c1-9399-b3c9072c9158-kube-api-access-c2gk9" (OuterVolumeSpecName: "kube-api-access-c2gk9") pod "5e4e7296-ad39-41c1-9399-b3c9072c9158" (UID: "5e4e7296-ad39-41c1-9399-b3c9072c9158"). InnerVolumeSpecName "kube-api-access-c2gk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.923177 4922 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f339784-df58-44f7-947d-9d80559c1c0c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.923209 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2gk9\" (UniqueName: \"kubernetes.io/projected/5e4e7296-ad39-41c1-9399-b3c9072c9158-kube-api-access-c2gk9\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.923234 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldx9v\" (UniqueName: \"kubernetes.io/projected/1f339784-df58-44f7-947d-9d80559c1c0c-kube-api-access-ldx9v\") on node \"crc\" DevicePath \"\"" Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.933451 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi78f1-account-delete-mwv4p"] Nov 28 07:20:01 crc kubenswrapper[4922]: I1128 07:20:01.938316 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapi78f1-account-delete-mwv4p"] Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.636445 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell032a6-account-delete-tcwgc" event={"ID":"5e4e7296-ad39-41c1-9399-b3c9072c9158","Type":"ContainerDied","Data":"d4dab28ad08f4dcc5cb1f357a3e1dd6d6ac36c62db7101a62cbba27885bdda92"} Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.637973 4922 scope.go:117] "RemoveContainer" containerID="1b294e250b4e8005476292722797df8e5418fe5de28dbe00f75e79efc8faf395" Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.636464 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell032a6-account-delete-tcwgc" Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.638961 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance75df-account-delete-wdw2x" event={"ID":"1f339784-df58-44f7-947d-9d80559c1c0c","Type":"ContainerDied","Data":"82e2aea8a8118d592fb635465fa1cabb0ff91e106bd61f291e2a196c1a5611e4"} Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.639155 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance75df-account-delete-wdw2x" Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.669936 4922 scope.go:117] "RemoveContainer" containerID="e61f1fa7822b5f2b3605c61ec01fc8d12a428bfdcca76852ff14c1bcc915a5cd" Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.711772 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell032a6-account-delete-tcwgc"] Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.723862 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell032a6-account-delete-tcwgc"] Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.735133 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance75df-account-delete-wdw2x"] Nov 28 07:20:02 crc kubenswrapper[4922]: I1128 07:20:02.742324 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance75df-account-delete-wdw2x"] Nov 28 07:20:03 crc kubenswrapper[4922]: I1128 07:20:03.416082 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f339784-df58-44f7-947d-9d80559c1c0c" path="/var/lib/kubelet/pods/1f339784-df58-44f7-947d-9d80559c1c0c/volumes" Nov 28 07:20:03 crc kubenswrapper[4922]: I1128 07:20:03.417136 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2425a44a-c3c8-4533-9aa6-deb657556efb" path="/var/lib/kubelet/pods/2425a44a-c3c8-4533-9aa6-deb657556efb/volumes" Nov 28 07:20:03 crc kubenswrapper[4922]: I1128 07:20:03.418176 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e442fd0-cd46-4c04-afb3-96892d39c0f4" path="/var/lib/kubelet/pods/3e442fd0-cd46-4c04-afb3-96892d39c0f4/volumes" Nov 28 07:20:03 crc kubenswrapper[4922]: I1128 07:20:03.419306 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e4e7296-ad39-41c1-9399-b3c9072c9158" path="/var/lib/kubelet/pods/5e4e7296-ad39-41c1-9399-b3c9072c9158/volumes" Nov 28 07:20:03 crc kubenswrapper[4922]: I1128 07:20:03.421563 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" path="/var/lib/kubelet/pods/7dfc2e52-b959-4718-8f85-5bcec1a8ad10/volumes" Nov 28 07:20:03 crc kubenswrapper[4922]: I1128 07:20:03.422754 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca6d488f-6085-4e22-a325-1b749d8c154c" path="/var/lib/kubelet/pods/ca6d488f-6085-4e22-a325-1b749d8c154c/volumes" Nov 28 07:20:11 crc kubenswrapper[4922]: I1128 07:20:11.398827 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:20:11 crc kubenswrapper[4922]: E1128 07:20:11.399961 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:20:24 crc kubenswrapper[4922]: I1128 07:20:24.399070 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:20:24 crc kubenswrapper[4922]: E1128 07:20:24.400097 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:20:39 crc kubenswrapper[4922]: I1128 07:20:39.399117 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:20:39 crc kubenswrapper[4922]: E1128 07:20:39.400272 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.643838 4922 scope.go:117] "RemoveContainer" containerID="6be1d97db640d5474aaa7b9a9a30f8a39026dbc0c5f30d3287779f0276df5d19" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.687713 4922 scope.go:117] "RemoveContainer" containerID="6327f387153de8972c997a6ac2a21401ab00a932a9990342cf61d1e499fcf45e" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.737448 4922 scope.go:117] "RemoveContainer" containerID="e402e349dd0eadea957795fc5040e837508e28009179cb1326770ba305c82e02" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.771095 4922 scope.go:117] "RemoveContainer" containerID="ad88894264657dce236a449d6c1a1670d012fb1c0d3ce440093a76099c5e14a9" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.802144 4922 scope.go:117] "RemoveContainer" containerID="1bf50c3bf71cc2b87dc27ccb1b161efb4927ad6c2b31e4d00ce05f6d04927aa0" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.855950 4922 scope.go:117] "RemoveContainer" containerID="60b74196fbe2ff1313cee7aed6bbe3eed676d5ed07bfc2f788f42483321a3e19" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.879177 4922 scope.go:117] "RemoveContainer" containerID="e47ad8cdb4ff4702350e09f95a7319059daf400eab8c1e5b6e95d48c4c065416" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.899849 4922 scope.go:117] "RemoveContainer" containerID="d63f5032ec7eec07123d8a155a004097f6b3f389d18cf7e99c5e839856a73538" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.916995 4922 scope.go:117] "RemoveContainer" containerID="4832975092faa8cc9fb4db742453510f007c3fc48070e2ad74419d9ba8a9f78f" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.939564 4922 scope.go:117] "RemoveContainer" containerID="5110efc5edb18c6fad94ea09c68d35b7915ac98812274e1ced945184faa6c733" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.963599 4922 scope.go:117] "RemoveContainer" containerID="8f71a24ebe84a952f8cb8a31ae36d0aff6eddbd2132b3fe335e8fbf512a8c407" Nov 28 07:20:48 crc kubenswrapper[4922]: I1128 07:20:48.985802 4922 scope.go:117] "RemoveContainer" containerID="dc917e5ac2177fcf0349991256f89d7daab4e05658a7642e65f2e84ec52e5091" Nov 28 07:20:49 crc kubenswrapper[4922]: I1128 07:20:49.008317 4922 scope.go:117] "RemoveContainer" containerID="7f03b582aec1162263159ea6da47f876369f0dc7f8a0f4cc453baf73b8acf408" Nov 28 07:20:49 crc kubenswrapper[4922]: I1128 07:20:49.033060 4922 scope.go:117] "RemoveContainer" containerID="015faea8c54059d2c396bd52bbb248295f40419e62f3e4d0f792f4a2770227e6" Nov 28 07:20:49 crc kubenswrapper[4922]: I1128 07:20:49.060536 4922 scope.go:117] "RemoveContainer" containerID="bb427b6ae33fbc331bbaba903c4b8ab50fb72e61639236cfc209beabc0998499" Nov 28 07:20:54 crc kubenswrapper[4922]: I1128 07:20:54.399115 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:20:54 crc kubenswrapper[4922]: E1128 07:20:54.400017 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:21:07 crc kubenswrapper[4922]: I1128 07:21:07.399129 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:21:07 crc kubenswrapper[4922]: E1128 07:21:07.399914 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:21:19 crc kubenswrapper[4922]: I1128 07:21:19.398766 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:21:19 crc kubenswrapper[4922]: E1128 07:21:19.399611 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:21:31 crc kubenswrapper[4922]: I1128 07:21:31.398483 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:21:31 crc kubenswrapper[4922]: E1128 07:21:31.399256 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:21:43 crc kubenswrapper[4922]: I1128 07:21:43.398367 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:21:43 crc kubenswrapper[4922]: E1128 07:21:43.399044 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:21:49 crc kubenswrapper[4922]: I1128 07:21:49.603631 4922 scope.go:117] "RemoveContainer" containerID="29b28ab9109f2b7a10e56ca5f88ae46e6843947c82e9c3c79ada80258c2f7af2" Nov 28 07:21:49 crc kubenswrapper[4922]: I1128 07:21:49.640946 4922 scope.go:117] "RemoveContainer" containerID="ef4aee34e3a95cc67545ce7f5bd5bb7c06099bf473d6fbe68964bb534c89632c" Nov 28 07:21:49 crc kubenswrapper[4922]: I1128 07:21:49.692453 4922 scope.go:117] "RemoveContainer" containerID="2fede77209e185216c36e681023a500b49193d28779dba1f035c16d758a707af" Nov 28 07:21:49 crc kubenswrapper[4922]: I1128 07:21:49.716757 4922 scope.go:117] "RemoveContainer" containerID="3a18b37121a05838619515c25b85cf8300c82f732967d64971b59cf884f40ac9" Nov 28 07:21:49 crc kubenswrapper[4922]: I1128 07:21:49.760317 4922 scope.go:117] "RemoveContainer" containerID="6272661054f8024c9e3c8eadc23e10a3e6ff363967adc44a0dd8e60f1e941455" Nov 28 07:21:49 crc kubenswrapper[4922]: I1128 07:21:49.786524 4922 scope.go:117] "RemoveContainer" containerID="a64d396721a8963cced703ead55e89ffd3e5ecf1115b154b1c9a98c9aa628afe" Nov 28 07:21:49 crc kubenswrapper[4922]: I1128 07:21:49.829070 4922 scope.go:117] "RemoveContainer" containerID="cefaa4cdaf9ad55f4c7cbb99f1e2ca32d61f84e80b290f17490a70169206b776" Nov 28 07:21:57 crc kubenswrapper[4922]: I1128 07:21:57.399925 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:21:57 crc kubenswrapper[4922]: E1128 07:21:57.400716 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:22:12 crc kubenswrapper[4922]: I1128 07:22:12.398605 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:22:12 crc kubenswrapper[4922]: E1128 07:22:12.399440 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:22:27 crc kubenswrapper[4922]: I1128 07:22:27.399074 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:22:27 crc kubenswrapper[4922]: E1128 07:22:27.399800 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:22:40 crc kubenswrapper[4922]: I1128 07:22:40.399731 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:22:40 crc kubenswrapper[4922]: E1128 07:22:40.400828 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:22:49 crc kubenswrapper[4922]: I1128 07:22:49.976593 4922 scope.go:117] "RemoveContainer" containerID="31b146f13fd3f77793153557e7b67237270c42c2a177ad53d0c2fee7a88ba3e1" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.002702 4922 scope.go:117] "RemoveContainer" containerID="f4286284a20c6edd0e0aa4a65c4e2c67716484e715938e31801e5192f3451cb4" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.029881 4922 scope.go:117] "RemoveContainer" containerID="602b2fe4aa584b9689b82571f4d574d08a11fdfc974904fc9c89c5481f00ee22" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.084696 4922 scope.go:117] "RemoveContainer" containerID="ca51681097b612d4dfe7b851461b5269b74ed8bfc135acb38f4eda642ba424ca" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.111828 4922 scope.go:117] "RemoveContainer" containerID="8d636352f04a5d6b017ec6d07127ee84924adb2187df00d394fe1e1c6dd31678" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.137522 4922 scope.go:117] "RemoveContainer" containerID="544b8ca487961ebba0e9ae2fe55cddd16f56331a6a8dc4a0178b8e21bcbd479b" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.164296 4922 scope.go:117] "RemoveContainer" containerID="565068b14a54fb0888fb374689df2d8105a821181b3cf92257d558b607b5df90" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.189569 4922 scope.go:117] "RemoveContainer" containerID="d454c9cde22b3235276b93c623bcd54e057f380c3dc05277d2f60b55df5ae160" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.214977 4922 scope.go:117] "RemoveContainer" containerID="f3d2d07c15c9482f58a3ac7590f85b4346618b0eeecb29109666f5bb7d97c88b" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.243373 4922 scope.go:117] "RemoveContainer" containerID="8f93d8f601f3f004d218215d678891da73bd9d1c7c1fafa0468e8c068a5d027f" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.278901 4922 scope.go:117] "RemoveContainer" containerID="4da5e05550131fabfd117c197bf055d5d73de16102d7399e3b3054b31ac41369" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.302368 4922 scope.go:117] "RemoveContainer" containerID="a9691659d3ff87f88c40b63c23edb6d642acab95cce0a8ee57f6220a5ee64034" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.321759 4922 scope.go:117] "RemoveContainer" containerID="59b955d5f5e239d9256f177532927e0d6ad988d595cf7e2a850f169518ab9d6f" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.343871 4922 scope.go:117] "RemoveContainer" containerID="b1de83b3a6f902b036e25483c19523a902ba3f397b59a81a79e88727c51fa4bb" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.366227 4922 scope.go:117] "RemoveContainer" containerID="0c46d19aa54b7ae1c585537a2f9d7f0d49af908f52150fea5f7f93185d9dc261" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.382788 4922 scope.go:117] "RemoveContainer" containerID="28c7a774a98a2323e452c6bd805a2593a12ee9e288be70f0ee93a486ccecf3c7" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.402907 4922 scope.go:117] "RemoveContainer" containerID="08b04a1731e257cb827e2fa7f859061cea2670c54000e2d9698e3c70be4917ea" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.423861 4922 scope.go:117] "RemoveContainer" containerID="2007239de0a02e5d422915e862a8620464730ddc29632ad08e99aaf25724d88b" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.443717 4922 scope.go:117] "RemoveContainer" containerID="fa47c7971f018e6f78ebe16ae788ea0ffef357beb5f780cf216345ce77143bae" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.468601 4922 scope.go:117] "RemoveContainer" containerID="2cd63c61c70658881bcd06ab76b9b69600a15655926ae05749a525c4458f85a1" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.499469 4922 scope.go:117] "RemoveContainer" containerID="24c474c965b970a0b3c56c3c7e70c4cae7c1ae26b1b3ea9a8673e89232f551af" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.525320 4922 scope.go:117] "RemoveContainer" containerID="4813e7caa96017317260102272646b375cfb0680074050a84464f8277f0d586f" Nov 28 07:22:50 crc kubenswrapper[4922]: I1128 07:22:50.550721 4922 scope.go:117] "RemoveContainer" containerID="3e9e34bfe759256fa864acd8b367abab7da733b2eac9f67bae9e4abbbf926f5e" Nov 28 07:22:51 crc kubenswrapper[4922]: I1128 07:22:51.399644 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:22:51 crc kubenswrapper[4922]: E1128 07:22:51.400673 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:23:02 crc kubenswrapper[4922]: I1128 07:23:02.398962 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:23:02 crc kubenswrapper[4922]: E1128 07:23:02.400299 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:23:17 crc kubenswrapper[4922]: I1128 07:23:17.398640 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:23:17 crc kubenswrapper[4922]: E1128 07:23:17.399440 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:23:28 crc kubenswrapper[4922]: I1128 07:23:28.398793 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:23:28 crc kubenswrapper[4922]: E1128 07:23:28.400014 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:23:43 crc kubenswrapper[4922]: I1128 07:23:43.398829 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:23:43 crc kubenswrapper[4922]: E1128 07:23:43.399516 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:23:50 crc kubenswrapper[4922]: I1128 07:23:50.758875 4922 scope.go:117] "RemoveContainer" containerID="481401725c47439fb318ffd00b81bd3da87b04044342d823c60fbcdefada5137" Nov 28 07:23:50 crc kubenswrapper[4922]: I1128 07:23:50.778679 4922 scope.go:117] "RemoveContainer" containerID="7483003f20a270bbf5574501f5b8ee700ddb926515c72515339c39eeb42a40e0" Nov 28 07:23:50 crc kubenswrapper[4922]: I1128 07:23:50.796143 4922 scope.go:117] "RemoveContainer" containerID="2b64afb60c87216a1524a5214f7e0fc6872a12dbbbbe2a4ea9c5c09709f0b040" Nov 28 07:23:50 crc kubenswrapper[4922]: I1128 07:23:50.846910 4922 scope.go:117] "RemoveContainer" containerID="b462e55e4b1d1673313c6dfd787beceb0503ca800228d804b043cfabe37c1295" Nov 28 07:23:50 crc kubenswrapper[4922]: I1128 07:23:50.865905 4922 scope.go:117] "RemoveContainer" containerID="0c67e228e5c4031baab99deaaa17d2e3b356c3de5d17e6c1f5a5e4b5ad468777" Nov 28 07:23:50 crc kubenswrapper[4922]: I1128 07:23:50.906721 4922 scope.go:117] "RemoveContainer" containerID="9af11d45a9ec4a7c6386d4f9531d1d6d3337db4ee65cf969fcc2b73d76c44a51" Nov 28 07:23:50 crc kubenswrapper[4922]: I1128 07:23:50.956205 4922 scope.go:117] "RemoveContainer" containerID="c0b119f326caea92368ebddce52cea98327f9c2b47b9496a3050507d1378df2b" Nov 28 07:23:56 crc kubenswrapper[4922]: I1128 07:23:56.399354 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:23:56 crc kubenswrapper[4922]: E1128 07:23:56.400250 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:24:08 crc kubenswrapper[4922]: I1128 07:24:08.398850 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:24:08 crc kubenswrapper[4922]: E1128 07:24:08.399593 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:24:22 crc kubenswrapper[4922]: I1128 07:24:22.398952 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:24:22 crc kubenswrapper[4922]: E1128 07:24:22.400274 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:24:34 crc kubenswrapper[4922]: I1128 07:24:34.398944 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:24:35 crc kubenswrapper[4922]: I1128 07:24:35.584869 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"97cf555779e0029b2b84ed5757414d94cd98ef367047f53959a1e37429ebd992"} Nov 28 07:24:51 crc kubenswrapper[4922]: I1128 07:24:51.044646 4922 scope.go:117] "RemoveContainer" containerID="a81a1de4776394ff567145b64398d5b889f141b8c8414e1def8da4f9987ce3f1" Nov 28 07:24:51 crc kubenswrapper[4922]: I1128 07:24:51.095341 4922 scope.go:117] "RemoveContainer" containerID="795b36a26de43f1013175b895aa1a7b8536f7165adb0d077f3c2fe6338925633" Nov 28 07:24:51 crc kubenswrapper[4922]: I1128 07:24:51.148680 4922 scope.go:117] "RemoveContainer" containerID="15a728fdcdf56d07c7207624ae8d8276363300730509050cda619bc595b50b6d" Nov 28 07:25:51 crc kubenswrapper[4922]: I1128 07:25:51.230118 4922 scope.go:117] "RemoveContainer" containerID="0e1a10156d7aa395ea45da2312dbadfb3411b35bdd8119560d9c3758fe29d917" Nov 28 07:25:51 crc kubenswrapper[4922]: I1128 07:25:51.262504 4922 scope.go:117] "RemoveContainer" containerID="53e8546901e2e79532d9bcc222c2dcdbe253ab144bd12f21ece8f360f849d3e9" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.377188 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rnfv5"] Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391491 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f672d6bb-97fc-4547-a14b-af27d631fe2a" containerName="kube-state-metrics" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391533 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f672d6bb-97fc-4547-a14b-af27d631fe2a" containerName="kube-state-metrics" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391567 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="extract-content" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391576 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="extract-content" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391593 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391601 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391611 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="registry-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391622 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="registry-server" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391644 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391653 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391674 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerName="rabbitmq" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391682 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerName="rabbitmq" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391706 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391713 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-server" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391722 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391730 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391749 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="ovn-northd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391762 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="ovn-northd" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391771 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349fc74f-b0ac-437d-89ab-7106192b8e9e" containerName="mysql-bootstrap" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391780 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="349fc74f-b0ac-437d-89ab-7106192b8e9e" containerName="mysql-bootstrap" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391805 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf191164-20d6-4d60-b111-6373616d9622" containerName="neutron-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391816 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf191164-20d6-4d60-b111-6373616d9622" containerName="neutron-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391835 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98e654e6-cf7b-469f-aa60-118fee0e3764" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391843 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="98e654e6-cf7b-469f-aa60-118fee0e3764" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391872 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerName="init" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391880 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerName="init" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391896 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391904 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391927 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerName="ovn-controller" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391936 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerName="ovn-controller" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391972 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" containerName="barbican-keystone-listener" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.391981 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" containerName="barbican-keystone-listener" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.391993 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" containerName="nova-cell0-conductor-conductor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392002 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" containerName="nova-cell0-conductor-conductor" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392025 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="swift-recon-cron" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392033 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="swift-recon-cron" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392045 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca6d488f-6085-4e22-a325-1b749d8c154c" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392053 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca6d488f-6085-4e22-a325-1b749d8c154c" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392087 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="sg-core" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392096 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="sg-core" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392105 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" containerName="barbican-keystone-listener-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392115 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" containerName="barbican-keystone-listener-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392144 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2070fbd8-e847-4b99-ba55-4579804bbc57" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392153 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2070fbd8-e847-4b99-ba55-4579804bbc57" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392178 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182970fb-401f-404c-81c1-db0294b02167" containerName="glance-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392186 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="182970fb-401f-404c-81c1-db0294b02167" containerName="glance-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392204 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerName="glance-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392234 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerName="glance-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392251 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="setup-container" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392260 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="setup-container" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392272 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392298 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392314 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349fc74f-b0ac-437d-89ab-7106192b8e9e" containerName="galera" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392322 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="349fc74f-b0ac-437d-89ab-7106192b8e9e" containerName="galera" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392352 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392364 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392381 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" containerName="mysql-bootstrap" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392389 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" containerName="mysql-bootstrap" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392406 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" containerName="galera" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392414 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" containerName="galera" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392452 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" containerName="barbican-keystone-listener" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392462 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" containerName="barbican-keystone-listener" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392484 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerName="probe" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392492 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerName="probe" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392506 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="rsync" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392514 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="rsync" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392538 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerName="setup-container" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392547 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerName="setup-container" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392557 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerName="barbican-worker" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392564 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerName="barbican-worker" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392579 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" containerName="barbican-keystone-listener-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392587 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" containerName="barbican-keystone-listener-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392596 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerName="barbican-worker" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392618 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerName="barbican-worker" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392635 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf191164-20d6-4d60-b111-6373616d9622" containerName="neutron-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392643 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf191164-20d6-4d60-b111-6373616d9622" containerName="neutron-api" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392666 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-reaper" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392678 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-reaper" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392690 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392701 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392720 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e442fd0-cd46-4c04-afb3-96892d39c0f4" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392728 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e442fd0-cd46-4c04-afb3-96892d39c0f4" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392748 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392755 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-server" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392770 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerName="glance-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392778 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerName="glance-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392802 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-updater" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392809 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-updater" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392817 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392824 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-api" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392834 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392842 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392866 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392874 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-server" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392888 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392896 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392905 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2425a44a-c3c8-4533-9aa6-deb657556efb" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392912 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2425a44a-c3c8-4533-9aa6-deb657556efb" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392940 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392953 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392969 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="rabbitmq" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.392977 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="rabbitmq" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.392993 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="182970fb-401f-404c-81c1-db0294b02167" containerName="glance-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393001 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="182970fb-401f-404c-81c1-db0294b02167" containerName="glance-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393013 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2566655-e076-471c-af4c-1e218f70ebe1" containerName="nova-cell1-conductor-conductor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393021 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2566655-e076-471c-af4c-1e218f70ebe1" containerName="nova-cell1-conductor-conductor" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393038 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" containerName="nova-scheduler-scheduler" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393047 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" containerName="nova-scheduler-scheduler" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393062 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-metadata" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393069 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-metadata" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393096 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393104 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393127 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server-init" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393135 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server-init" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393143 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393151 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393175 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393182 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393204 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-expirer" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393212 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-expirer" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393242 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" containerName="placement-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393249 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" containerName="placement-api" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393338 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerName="barbican-worker-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393348 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerName="barbican-worker-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393361 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerName="dnsmasq-dns" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393369 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerName="dnsmasq-dns" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393385 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-updater" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393393 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-updater" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393410 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerName="ovsdbserver-sb" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393418 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerName="ovsdbserver-sb" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393438 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="proxy-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393446 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="proxy-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393468 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="extract-utilities" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393476 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="extract-utilities" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393484 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92eb4ce5-fb24-4b33-8e79-6f4e7ba96372" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393492 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="92eb4ce5-fb24-4b33-8e79-6f4e7ba96372" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393515 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81469087-f8d4-4499-a1e3-9fe103758289" containerName="keystone-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393523 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="81469087-f8d4-4499-a1e3-9fe103758289" containerName="keystone-api" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393546 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393554 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393568 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d12d285-16c4-4e64-98d8-cff0f581aee4" containerName="memcached" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393575 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d12d285-16c4-4e64-98d8-cff0f581aee4" containerName="memcached" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393592 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="ovsdbserver-nb" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393600 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="ovsdbserver-nb" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393622 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393630 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-server" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393645 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f339784-df58-44f7-947d-9d80559c1c0c" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393653 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f339784-df58-44f7-947d-9d80559c1c0c" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393673 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e4e7296-ad39-41c1-9399-b3c9072c9158" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393681 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e4e7296-ad39-41c1-9399-b3c9072c9158" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393697 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerName="cinder-scheduler" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393705 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerName="cinder-scheduler" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393727 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393735 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393757 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393765 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393787 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393794 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393809 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393818 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393829 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="ceilometer-notification-agent" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393837 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="ceilometer-notification-agent" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393851 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1018f07c-38b9-440f-b126-26e59293e757" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393860 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="1018f07c-38b9-440f-b126-26e59293e757" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393881 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393889 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393905 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393914 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393922 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" containerName="placement-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393930 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" containerName="placement-log" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393952 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="ceilometer-central-agent" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393960 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="ceilometer-central-agent" Nov 28 07:26:52 crc kubenswrapper[4922]: E1128 07:26:52.393975 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerName="barbican-worker-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.393983 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerName="barbican-worker-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394342 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e4e7296-ad39-41c1-9399-b3c9072c9158" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394366 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394390 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="ceilometer-notification-agent" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394423 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394439 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394453 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394476 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" containerName="placement-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394487 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a50cebf-c40b-425a-86a1-7813277f1b5a" containerName="dnsmasq-dns" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394501 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2070fbd8-e847-4b99-ba55-4579804bbc57" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394515 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="swift-recon-cron" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394532 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2566655-e076-471c-af4c-1e218f70ebe1" containerName="nova-cell1-conductor-conductor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394558 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394582 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="1018f07c-38b9-440f-b126-26e59293e757" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394605 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="39a4d24f-6b5b-48fc-ab66-1ad33462c477" containerName="ovsdbserver-sb" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394619 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394634 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d12d285-16c4-4e64-98d8-cff0f581aee4" containerName="memcached" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394653 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerName="barbican-worker-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394676 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-expirer" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394698 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394714 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394730 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovs-vswitchd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394760 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerName="barbican-worker-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394791 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394809 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf191164-20d6-4d60-b111-6373616d9622" containerName="neutron-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394827 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="182970fb-401f-404c-81c1-db0294b02167" containerName="glance-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394845 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394868 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e0e318-5b90-4c18-ba95-fc261ffb519d" containerName="barbican-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394884 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d97532b-e9ff-4031-a82c-3db5e943bfd9" containerName="barbican-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394903 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394913 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerName="probe" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394935 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf25acc-0d60-4b0a-a9c9-adc7ddce7458" containerName="rabbitmq" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.394951 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="e070595b-ded5-4ba1-8e5d-10dee3f64439" containerName="ovn-northd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395006 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8685dc8-7577-4076-8a5a-beba52e9bae7" containerName="barbican-worker" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395024 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="92eb4ce5-fb24-4b33-8e79-6f4e7ba96372" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395072 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec882eb7-01fb-4f7f-bad8-812346e5880e" containerName="ovsdb-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395084 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="182970fb-401f-404c-81c1-db0294b02167" containerName="glance-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395100 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c038b865-4b32-4be3-9e0a-8c40dc140a68" containerName="cinder-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395115 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="ovsdbserver-nb" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395155 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="98e654e6-cf7b-469f-aa60-118fee0e3764" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395171 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f339784-df58-44f7-947d-9d80559c1c0c" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395184 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" containerName="barbican-keystone-listener-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395252 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c39356f2-8f5d-45d3-8188-7d9428c4d8bf" containerName="galera" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395269 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="678d1f5b-5ebc-4b9e-b5ab-316ec7dfda05" containerName="cinder-scheduler" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395279 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="proxy-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395303 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="2425a44a-c3c8-4533-9aa6-deb657556efb" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395346 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dfc2e52-b959-4718-8f85-5bcec1a8ad10" containerName="nova-cell0-conductor-conductor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395380 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="sg-core" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395435 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395446 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395464 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerName="glance-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395513 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="21fdc98b-667f-44b1-9fae-87f96ba4b514" containerName="registry-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395527 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e442fd0-cd46-4c04-afb3-96892d39c0f4" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395543 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cd494ee-6c17-4d94-96c2-9e2fcf02b2bc" containerName="glance-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395583 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="99708a5d-57d5-4479-8e09-94428bb13fa3" containerName="rabbitmq" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395602 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395614 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="object-updater" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395624 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5e01f31-28bd-46a2-b5cc-695c485deaf6" containerName="ovn-controller" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395672 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="d27299ac-7d8d-4485-86fb-6ac7f34ea1ae" containerName="openstack-network-exporter" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395681 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-updater" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395694 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-server" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395710 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b2a607-b6c1-4e95-b722-8b150c25f371" containerName="ceilometer-central-agent" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395745 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ef73d39-2ed2-4168-8598-e0749aa0a26b" containerName="barbican-worker" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395770 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf191164-20d6-4d60-b111-6373616d9622" containerName="neutron-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395784 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-replicator" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395793 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" containerName="barbican-keystone-listener" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395836 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="c10f3b66-a7e0-4690-939a-5938de689b3a" containerName="proxy-httpd" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395847 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="rsync" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395859 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="454a2683-850f-4ce0-8ebe-7758105dd255" containerName="barbican-keystone-listener" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395881 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="81469087-f8d4-4499-a1e3-9fe103758289" containerName="keystone-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395926 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed30601b-1e7c-4aa6-8469-8ff61cd93253" containerName="nova-api-api" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395942 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca6d488f-6085-4e22-a325-1b749d8c154c" containerName="mariadb-account-delete" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395957 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c0f0857-2ca9-49c8-90ac-1351b2ee2f11" containerName="nova-scheduler-scheduler" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.395996 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e1382f2-6597-4c09-a171-8709e4b9f5f7" containerName="nova-metadata-metadata" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.396007 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="account-reaper" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.396029 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="349fc74f-b0ac-437d-89ab-7106192b8e9e" containerName="galera" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.396069 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ece1333-c457-4099-bf00-1daa969a14dc" containerName="barbican-keystone-listener-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.396090 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="02d26a97-d447-4f76-90ed-9357e343cd91" containerName="placement-log" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.396109 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c3d0a8-d9ed-419a-baf3-57aaaf0c56fe" containerName="container-auditor" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.396120 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="f672d6bb-97fc-4547-a14b-af27d631fe2a" containerName="kube-state-metrics" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.399029 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.409024 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnfv5"] Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.511816 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-catalog-content\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.511931 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-utilities\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.512004 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnpqc\" (UniqueName: \"kubernetes.io/projected/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-kube-api-access-qnpqc\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.613853 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnpqc\" (UniqueName: \"kubernetes.io/projected/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-kube-api-access-qnpqc\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.614167 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-catalog-content\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.614324 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-utilities\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.614717 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-catalog-content\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.615042 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-utilities\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.633490 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnpqc\" (UniqueName: \"kubernetes.io/projected/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-kube-api-access-qnpqc\") pod \"redhat-marketplace-rnfv5\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:52 crc kubenswrapper[4922]: I1128 07:26:52.752115 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:26:53 crc kubenswrapper[4922]: I1128 07:26:53.172269 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnfv5"] Nov 28 07:26:53 crc kubenswrapper[4922]: I1128 07:26:53.415335 4922 generic.go:334] "Generic (PLEG): container finished" podID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerID="00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5" exitCode=0 Nov 28 07:26:53 crc kubenswrapper[4922]: I1128 07:26:53.415410 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnfv5" event={"ID":"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32","Type":"ContainerDied","Data":"00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5"} Nov 28 07:26:53 crc kubenswrapper[4922]: I1128 07:26:53.416783 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnfv5" event={"ID":"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32","Type":"ContainerStarted","Data":"88d9cd4ac49c6bc4f1009535ff70473f4a31f2baf290cde1db6b6387309d754b"} Nov 28 07:26:53 crc kubenswrapper[4922]: I1128 07:26:53.417126 4922 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 07:26:54 crc kubenswrapper[4922]: I1128 07:26:54.428830 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnfv5" event={"ID":"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32","Type":"ContainerStarted","Data":"69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9"} Nov 28 07:26:55 crc kubenswrapper[4922]: I1128 07:26:55.442134 4922 generic.go:334] "Generic (PLEG): container finished" podID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerID="69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9" exitCode=0 Nov 28 07:26:55 crc kubenswrapper[4922]: I1128 07:26:55.442271 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnfv5" event={"ID":"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32","Type":"ContainerDied","Data":"69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9"} Nov 28 07:26:56 crc kubenswrapper[4922]: I1128 07:26:56.455519 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnfv5" event={"ID":"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32","Type":"ContainerStarted","Data":"4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e"} Nov 28 07:26:56 crc kubenswrapper[4922]: I1128 07:26:56.490052 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rnfv5" podStartSLOduration=2.037576758 podStartE2EDuration="4.490022436s" podCreationTimestamp="2025-11-28 07:26:52 +0000 UTC" firstStartedPulling="2025-11-28 07:26:53.416634248 +0000 UTC m=+2058.337029830" lastFinishedPulling="2025-11-28 07:26:55.869079916 +0000 UTC m=+2060.789475508" observedRunningTime="2025-11-28 07:26:56.48096506 +0000 UTC m=+2061.401360672" watchObservedRunningTime="2025-11-28 07:26:56.490022436 +0000 UTC m=+2061.410418028" Nov 28 07:26:57 crc kubenswrapper[4922]: I1128 07:26:57.312326 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:26:57 crc kubenswrapper[4922]: I1128 07:26:57.312390 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:27:02 crc kubenswrapper[4922]: I1128 07:27:02.753266 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:27:02 crc kubenswrapper[4922]: I1128 07:27:02.753834 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:27:02 crc kubenswrapper[4922]: I1128 07:27:02.812521 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:27:03 crc kubenswrapper[4922]: I1128 07:27:03.587406 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:27:03 crc kubenswrapper[4922]: I1128 07:27:03.640706 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnfv5"] Nov 28 07:27:05 crc kubenswrapper[4922]: I1128 07:27:05.540934 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rnfv5" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerName="registry-server" containerID="cri-o://4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e" gracePeriod=2 Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.490979 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.549549 4922 generic.go:334] "Generic (PLEG): container finished" podID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerID="4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e" exitCode=0 Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.549594 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnfv5" event={"ID":"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32","Type":"ContainerDied","Data":"4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e"} Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.549624 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rnfv5" event={"ID":"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32","Type":"ContainerDied","Data":"88d9cd4ac49c6bc4f1009535ff70473f4a31f2baf290cde1db6b6387309d754b"} Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.549645 4922 scope.go:117] "RemoveContainer" containerID="4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.549770 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rnfv5" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.570019 4922 scope.go:117] "RemoveContainer" containerID="69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.594426 4922 scope.go:117] "RemoveContainer" containerID="00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.612630 4922 scope.go:117] "RemoveContainer" containerID="4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e" Nov 28 07:27:06 crc kubenswrapper[4922]: E1128 07:27:06.613057 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e\": container with ID starting with 4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e not found: ID does not exist" containerID="4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.613115 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e"} err="failed to get container status \"4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e\": rpc error: code = NotFound desc = could not find container \"4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e\": container with ID starting with 4f2f5526ed60df58fd2cbc659d9b2cdd6099d61af2ddf7f7f6dd44c2339b7e2e not found: ID does not exist" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.613146 4922 scope.go:117] "RemoveContainer" containerID="69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9" Nov 28 07:27:06 crc kubenswrapper[4922]: E1128 07:27:06.613610 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9\": container with ID starting with 69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9 not found: ID does not exist" containerID="69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.613663 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9"} err="failed to get container status \"69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9\": rpc error: code = NotFound desc = could not find container \"69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9\": container with ID starting with 69fbc7b484de4b14bd0a919938e57cd1f87dc9ed85306a6899c8c6850e6f4fd9 not found: ID does not exist" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.613696 4922 scope.go:117] "RemoveContainer" containerID="00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5" Nov 28 07:27:06 crc kubenswrapper[4922]: E1128 07:27:06.614079 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5\": container with ID starting with 00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5 not found: ID does not exist" containerID="00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.614124 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5"} err="failed to get container status \"00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5\": rpc error: code = NotFound desc = could not find container \"00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5\": container with ID starting with 00cc542b2a0ac4b0e8d41828ad2234253d5b0d940d22d904bfa5c5abf4b7baa5 not found: ID does not exist" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.619120 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnpqc\" (UniqueName: \"kubernetes.io/projected/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-kube-api-access-qnpqc\") pod \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.619196 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-utilities\") pod \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.619254 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-catalog-content\") pod \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\" (UID: \"98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32\") " Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.619995 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-utilities" (OuterVolumeSpecName: "utilities") pod "98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" (UID: "98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.624168 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-kube-api-access-qnpqc" (OuterVolumeSpecName: "kube-api-access-qnpqc") pod "98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" (UID: "98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32"). InnerVolumeSpecName "kube-api-access-qnpqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.638278 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" (UID: "98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.720878 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnpqc\" (UniqueName: \"kubernetes.io/projected/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-kube-api-access-qnpqc\") on node \"crc\" DevicePath \"\"" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.720917 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.720930 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.900966 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnfv5"] Nov 28 07:27:06 crc kubenswrapper[4922]: I1128 07:27:06.909863 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rnfv5"] Nov 28 07:27:07 crc kubenswrapper[4922]: I1128 07:27:07.413260 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" path="/var/lib/kubelet/pods/98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32/volumes" Nov 28 07:27:27 crc kubenswrapper[4922]: I1128 07:27:27.312132 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:27:27 crc kubenswrapper[4922]: I1128 07:27:27.312849 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:27:57 crc kubenswrapper[4922]: I1128 07:27:57.311935 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:27:57 crc kubenswrapper[4922]: I1128 07:27:57.312779 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:27:57 crc kubenswrapper[4922]: I1128 07:27:57.312988 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:27:57 crc kubenswrapper[4922]: I1128 07:27:57.313967 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"97cf555779e0029b2b84ed5757414d94cd98ef367047f53959a1e37429ebd992"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:27:57 crc kubenswrapper[4922]: I1128 07:27:57.314058 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://97cf555779e0029b2b84ed5757414d94cd98ef367047f53959a1e37429ebd992" gracePeriod=600 Nov 28 07:27:58 crc kubenswrapper[4922]: I1128 07:27:58.027757 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="97cf555779e0029b2b84ed5757414d94cd98ef367047f53959a1e37429ebd992" exitCode=0 Nov 28 07:27:58 crc kubenswrapper[4922]: I1128 07:27:58.027897 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"97cf555779e0029b2b84ed5757414d94cd98ef367047f53959a1e37429ebd992"} Nov 28 07:27:58 crc kubenswrapper[4922]: I1128 07:27:58.028126 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216"} Nov 28 07:27:58 crc kubenswrapper[4922]: I1128 07:27:58.028160 4922 scope.go:117] "RemoveContainer" containerID="50188513677cb6b14ed255820648795c8e06f68b16a46dc87e268985d568770f" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.298633 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jsxnv"] Nov 28 07:28:21 crc kubenswrapper[4922]: E1128 07:28:21.299964 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerName="extract-utilities" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.299996 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerName="extract-utilities" Nov 28 07:28:21 crc kubenswrapper[4922]: E1128 07:28:21.300014 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerName="extract-content" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.300030 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerName="extract-content" Nov 28 07:28:21 crc kubenswrapper[4922]: E1128 07:28:21.300065 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerName="registry-server" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.300082 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerName="registry-server" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.300419 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="98a33cd4-34cc-4ec2-a3d5-d15dabfb2a32" containerName="registry-server" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.302130 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.364588 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jsxnv"] Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.451785 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-utilities\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.451877 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-catalog-content\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.451899 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzm89\" (UniqueName: \"kubernetes.io/projected/84cc1469-0b62-45aa-8a99-16a6784befa6-kube-api-access-zzm89\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.552750 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-utilities\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.553142 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-catalog-content\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.553162 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzm89\" (UniqueName: \"kubernetes.io/projected/84cc1469-0b62-45aa-8a99-16a6784befa6-kube-api-access-zzm89\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.554374 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-utilities\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.554496 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-catalog-content\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.575170 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzm89\" (UniqueName: \"kubernetes.io/projected/84cc1469-0b62-45aa-8a99-16a6784befa6-kube-api-access-zzm89\") pod \"certified-operators-jsxnv\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:21 crc kubenswrapper[4922]: I1128 07:28:21.649919 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:22 crc kubenswrapper[4922]: I1128 07:28:22.119145 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jsxnv"] Nov 28 07:28:22 crc kubenswrapper[4922]: I1128 07:28:22.254397 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jsxnv" event={"ID":"84cc1469-0b62-45aa-8a99-16a6784befa6","Type":"ContainerStarted","Data":"d22562e2cf06bd9204d58b3a55cb74a4632156c8286871e85ae72691df2b8f67"} Nov 28 07:28:23 crc kubenswrapper[4922]: I1128 07:28:23.266904 4922 generic.go:334] "Generic (PLEG): container finished" podID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerID="01b04e6b5c1a63950c8de53ab5a4e67b4ee909ada7ca714824a7a526134115fc" exitCode=0 Nov 28 07:28:23 crc kubenswrapper[4922]: I1128 07:28:23.267189 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jsxnv" event={"ID":"84cc1469-0b62-45aa-8a99-16a6784befa6","Type":"ContainerDied","Data":"01b04e6b5c1a63950c8de53ab5a4e67b4ee909ada7ca714824a7a526134115fc"} Nov 28 07:28:24 crc kubenswrapper[4922]: I1128 07:28:24.278214 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jsxnv" event={"ID":"84cc1469-0b62-45aa-8a99-16a6784befa6","Type":"ContainerStarted","Data":"f8e768a12c8a5188c989b92cc39876619b3a04d5e354c55dff9f26d3d55064ad"} Nov 28 07:28:25 crc kubenswrapper[4922]: I1128 07:28:25.290106 4922 generic.go:334] "Generic (PLEG): container finished" podID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerID="f8e768a12c8a5188c989b92cc39876619b3a04d5e354c55dff9f26d3d55064ad" exitCode=0 Nov 28 07:28:25 crc kubenswrapper[4922]: I1128 07:28:25.290156 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jsxnv" event={"ID":"84cc1469-0b62-45aa-8a99-16a6784befa6","Type":"ContainerDied","Data":"f8e768a12c8a5188c989b92cc39876619b3a04d5e354c55dff9f26d3d55064ad"} Nov 28 07:28:26 crc kubenswrapper[4922]: I1128 07:28:26.304781 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jsxnv" event={"ID":"84cc1469-0b62-45aa-8a99-16a6784befa6","Type":"ContainerStarted","Data":"e77285fe7e126cf48099482de6df0c322ea5d0f347b3ccf9af99d34677b6d678"} Nov 28 07:28:26 crc kubenswrapper[4922]: I1128 07:28:26.332039 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jsxnv" podStartSLOduration=2.850694059 podStartE2EDuration="5.332016923s" podCreationTimestamp="2025-11-28 07:28:21 +0000 UTC" firstStartedPulling="2025-11-28 07:28:23.270551294 +0000 UTC m=+2148.190946916" lastFinishedPulling="2025-11-28 07:28:25.751874158 +0000 UTC m=+2150.672269780" observedRunningTime="2025-11-28 07:28:26.330297227 +0000 UTC m=+2151.250692839" watchObservedRunningTime="2025-11-28 07:28:26.332016923 +0000 UTC m=+2151.252412535" Nov 28 07:28:31 crc kubenswrapper[4922]: I1128 07:28:31.650252 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:31 crc kubenswrapper[4922]: I1128 07:28:31.650799 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:31 crc kubenswrapper[4922]: I1128 07:28:31.730602 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:32 crc kubenswrapper[4922]: I1128 07:28:32.449404 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:32 crc kubenswrapper[4922]: I1128 07:28:32.529308 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jsxnv"] Nov 28 07:28:34 crc kubenswrapper[4922]: I1128 07:28:34.382654 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jsxnv" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerName="registry-server" containerID="cri-o://e77285fe7e126cf48099482de6df0c322ea5d0f347b3ccf9af99d34677b6d678" gracePeriod=2 Nov 28 07:28:35 crc kubenswrapper[4922]: I1128 07:28:35.394941 4922 generic.go:334] "Generic (PLEG): container finished" podID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerID="e77285fe7e126cf48099482de6df0c322ea5d0f347b3ccf9af99d34677b6d678" exitCode=0 Nov 28 07:28:35 crc kubenswrapper[4922]: I1128 07:28:35.395029 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jsxnv" event={"ID":"84cc1469-0b62-45aa-8a99-16a6784befa6","Type":"ContainerDied","Data":"e77285fe7e126cf48099482de6df0c322ea5d0f347b3ccf9af99d34677b6d678"} Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.280985 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.361925 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-catalog-content\") pod \"84cc1469-0b62-45aa-8a99-16a6784befa6\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.362106 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzm89\" (UniqueName: \"kubernetes.io/projected/84cc1469-0b62-45aa-8a99-16a6784befa6-kube-api-access-zzm89\") pod \"84cc1469-0b62-45aa-8a99-16a6784befa6\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.362205 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-utilities\") pod \"84cc1469-0b62-45aa-8a99-16a6784befa6\" (UID: \"84cc1469-0b62-45aa-8a99-16a6784befa6\") " Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.363675 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-utilities" (OuterVolumeSpecName: "utilities") pod "84cc1469-0b62-45aa-8a99-16a6784befa6" (UID: "84cc1469-0b62-45aa-8a99-16a6784befa6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.370844 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84cc1469-0b62-45aa-8a99-16a6784befa6-kube-api-access-zzm89" (OuterVolumeSpecName: "kube-api-access-zzm89") pod "84cc1469-0b62-45aa-8a99-16a6784befa6" (UID: "84cc1469-0b62-45aa-8a99-16a6784befa6"). InnerVolumeSpecName "kube-api-access-zzm89". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.410776 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jsxnv" event={"ID":"84cc1469-0b62-45aa-8a99-16a6784befa6","Type":"ContainerDied","Data":"d22562e2cf06bd9204d58b3a55cb74a4632156c8286871e85ae72691df2b8f67"} Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.410847 4922 scope.go:117] "RemoveContainer" containerID="e77285fe7e126cf48099482de6df0c322ea5d0f347b3ccf9af99d34677b6d678" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.410869 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jsxnv" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.442345 4922 scope.go:117] "RemoveContainer" containerID="f8e768a12c8a5188c989b92cc39876619b3a04d5e354c55dff9f26d3d55064ad" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.450051 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84cc1469-0b62-45aa-8a99-16a6784befa6" (UID: "84cc1469-0b62-45aa-8a99-16a6784befa6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.464266 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzm89\" (UniqueName: \"kubernetes.io/projected/84cc1469-0b62-45aa-8a99-16a6784befa6-kube-api-access-zzm89\") on node \"crc\" DevicePath \"\"" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.464332 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.464351 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84cc1469-0b62-45aa-8a99-16a6784befa6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.473796 4922 scope.go:117] "RemoveContainer" containerID="01b04e6b5c1a63950c8de53ab5a4e67b4ee909ada7ca714824a7a526134115fc" Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.764619 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jsxnv"] Nov 28 07:28:36 crc kubenswrapper[4922]: I1128 07:28:36.778324 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jsxnv"] Nov 28 07:28:37 crc kubenswrapper[4922]: I1128 07:28:37.415294 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" path="/var/lib/kubelet/pods/84cc1469-0b62-45aa-8a99-16a6784befa6/volumes" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.536002 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dbntp"] Nov 28 07:28:38 crc kubenswrapper[4922]: E1128 07:28:38.536763 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerName="registry-server" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.536780 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerName="registry-server" Nov 28 07:28:38 crc kubenswrapper[4922]: E1128 07:28:38.536804 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerName="extract-content" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.536814 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerName="extract-content" Nov 28 07:28:38 crc kubenswrapper[4922]: E1128 07:28:38.536840 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerName="extract-utilities" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.536847 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerName="extract-utilities" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.537003 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="84cc1469-0b62-45aa-8a99-16a6784befa6" containerName="registry-server" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.538147 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.567733 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dbntp"] Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.599969 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhz7p\" (UniqueName: \"kubernetes.io/projected/b16093ea-3dea-438c-a284-723af902088c-kube-api-access-hhz7p\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.600311 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-utilities\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.601177 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-catalog-content\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.705108 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-catalog-content\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.705719 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-catalog-content\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.705742 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhz7p\" (UniqueName: \"kubernetes.io/projected/b16093ea-3dea-438c-a284-723af902088c-kube-api-access-hhz7p\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.706414 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-utilities\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.706875 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-utilities\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.729126 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhz7p\" (UniqueName: \"kubernetes.io/projected/b16093ea-3dea-438c-a284-723af902088c-kube-api-access-hhz7p\") pod \"redhat-operators-dbntp\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:38 crc kubenswrapper[4922]: I1128 07:28:38.865583 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:39 crc kubenswrapper[4922]: I1128 07:28:39.341586 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dbntp"] Nov 28 07:28:39 crc kubenswrapper[4922]: W1128 07:28:39.348034 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb16093ea_3dea_438c_a284_723af902088c.slice/crio-c728603288e4d4460b08b65681111be135386a12f6523211eefee8c26692274a WatchSource:0}: Error finding container c728603288e4d4460b08b65681111be135386a12f6523211eefee8c26692274a: Status 404 returned error can't find the container with id c728603288e4d4460b08b65681111be135386a12f6523211eefee8c26692274a Nov 28 07:28:39 crc kubenswrapper[4922]: I1128 07:28:39.440234 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbntp" event={"ID":"b16093ea-3dea-438c-a284-723af902088c","Type":"ContainerStarted","Data":"c728603288e4d4460b08b65681111be135386a12f6523211eefee8c26692274a"} Nov 28 07:28:40 crc kubenswrapper[4922]: I1128 07:28:40.451915 4922 generic.go:334] "Generic (PLEG): container finished" podID="b16093ea-3dea-438c-a284-723af902088c" containerID="555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00" exitCode=0 Nov 28 07:28:40 crc kubenswrapper[4922]: I1128 07:28:40.452116 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbntp" event={"ID":"b16093ea-3dea-438c-a284-723af902088c","Type":"ContainerDied","Data":"555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00"} Nov 28 07:28:41 crc kubenswrapper[4922]: I1128 07:28:41.466672 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbntp" event={"ID":"b16093ea-3dea-438c-a284-723af902088c","Type":"ContainerStarted","Data":"fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50"} Nov 28 07:28:42 crc kubenswrapper[4922]: I1128 07:28:42.479090 4922 generic.go:334] "Generic (PLEG): container finished" podID="b16093ea-3dea-438c-a284-723af902088c" containerID="fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50" exitCode=0 Nov 28 07:28:42 crc kubenswrapper[4922]: I1128 07:28:42.479155 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbntp" event={"ID":"b16093ea-3dea-438c-a284-723af902088c","Type":"ContainerDied","Data":"fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50"} Nov 28 07:28:43 crc kubenswrapper[4922]: I1128 07:28:43.488390 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbntp" event={"ID":"b16093ea-3dea-438c-a284-723af902088c","Type":"ContainerStarted","Data":"41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd"} Nov 28 07:28:43 crc kubenswrapper[4922]: I1128 07:28:43.509014 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dbntp" podStartSLOduration=2.929253817 podStartE2EDuration="5.50899033s" podCreationTimestamp="2025-11-28 07:28:38 +0000 UTC" firstStartedPulling="2025-11-28 07:28:40.454638415 +0000 UTC m=+2165.375034027" lastFinishedPulling="2025-11-28 07:28:43.034374948 +0000 UTC m=+2167.954770540" observedRunningTime="2025-11-28 07:28:43.504250252 +0000 UTC m=+2168.424645854" watchObservedRunningTime="2025-11-28 07:28:43.50899033 +0000 UTC m=+2168.429385922" Nov 28 07:28:48 crc kubenswrapper[4922]: I1128 07:28:48.866345 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:48 crc kubenswrapper[4922]: I1128 07:28:48.867115 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:49 crc kubenswrapper[4922]: I1128 07:28:49.944455 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dbntp" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="registry-server" probeResult="failure" output=< Nov 28 07:28:49 crc kubenswrapper[4922]: timeout: failed to connect service ":50051" within 1s Nov 28 07:28:49 crc kubenswrapper[4922]: > Nov 28 07:28:58 crc kubenswrapper[4922]: I1128 07:28:58.943419 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:59 crc kubenswrapper[4922]: I1128 07:28:59.025323 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:28:59 crc kubenswrapper[4922]: I1128 07:28:59.195095 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dbntp"] Nov 28 07:29:00 crc kubenswrapper[4922]: I1128 07:29:00.656497 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dbntp" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="registry-server" containerID="cri-o://41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd" gracePeriod=2 Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.163778 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.290615 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-catalog-content\") pod \"b16093ea-3dea-438c-a284-723af902088c\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.291410 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhz7p\" (UniqueName: \"kubernetes.io/projected/b16093ea-3dea-438c-a284-723af902088c-kube-api-access-hhz7p\") pod \"b16093ea-3dea-438c-a284-723af902088c\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.291526 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-utilities\") pod \"b16093ea-3dea-438c-a284-723af902088c\" (UID: \"b16093ea-3dea-438c-a284-723af902088c\") " Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.292381 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-utilities" (OuterVolumeSpecName: "utilities") pod "b16093ea-3dea-438c-a284-723af902088c" (UID: "b16093ea-3dea-438c-a284-723af902088c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.300275 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b16093ea-3dea-438c-a284-723af902088c-kube-api-access-hhz7p" (OuterVolumeSpecName: "kube-api-access-hhz7p") pod "b16093ea-3dea-438c-a284-723af902088c" (UID: "b16093ea-3dea-438c-a284-723af902088c"). InnerVolumeSpecName "kube-api-access-hhz7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.393401 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.393453 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhz7p\" (UniqueName: \"kubernetes.io/projected/b16093ea-3dea-438c-a284-723af902088c-kube-api-access-hhz7p\") on node \"crc\" DevicePath \"\"" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.464184 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b16093ea-3dea-438c-a284-723af902088c" (UID: "b16093ea-3dea-438c-a284-723af902088c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.494553 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b16093ea-3dea-438c-a284-723af902088c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.668917 4922 generic.go:334] "Generic (PLEG): container finished" podID="b16093ea-3dea-438c-a284-723af902088c" containerID="41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd" exitCode=0 Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.668981 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbntp" event={"ID":"b16093ea-3dea-438c-a284-723af902088c","Type":"ContainerDied","Data":"41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd"} Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.669076 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dbntp" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.669107 4922 scope.go:117] "RemoveContainer" containerID="41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.669084 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbntp" event={"ID":"b16093ea-3dea-438c-a284-723af902088c","Type":"ContainerDied","Data":"c728603288e4d4460b08b65681111be135386a12f6523211eefee8c26692274a"} Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.696587 4922 scope.go:117] "RemoveContainer" containerID="fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.717057 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dbntp"] Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.725424 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dbntp"] Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.752389 4922 scope.go:117] "RemoveContainer" containerID="555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.777513 4922 scope.go:117] "RemoveContainer" containerID="41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd" Nov 28 07:29:01 crc kubenswrapper[4922]: E1128 07:29:01.778038 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd\": container with ID starting with 41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd not found: ID does not exist" containerID="41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.778101 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd"} err="failed to get container status \"41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd\": rpc error: code = NotFound desc = could not find container \"41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd\": container with ID starting with 41c35484abdb2c8023f4b518c2529279e6736f1d1296dcd28fdde21288039bcd not found: ID does not exist" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.778143 4922 scope.go:117] "RemoveContainer" containerID="fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50" Nov 28 07:29:01 crc kubenswrapper[4922]: E1128 07:29:01.778694 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50\": container with ID starting with fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50 not found: ID does not exist" containerID="fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.778785 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50"} err="failed to get container status \"fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50\": rpc error: code = NotFound desc = could not find container \"fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50\": container with ID starting with fb028e22ceb3010f3826559d916b55c347dc290225f8b4ba12c58cc932bfab50 not found: ID does not exist" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.778816 4922 scope.go:117] "RemoveContainer" containerID="555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00" Nov 28 07:29:01 crc kubenswrapper[4922]: E1128 07:29:01.779464 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00\": container with ID starting with 555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00 not found: ID does not exist" containerID="555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00" Nov 28 07:29:01 crc kubenswrapper[4922]: I1128 07:29:01.779525 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00"} err="failed to get container status \"555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00\": rpc error: code = NotFound desc = could not find container \"555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00\": container with ID starting with 555f066b32b75594531bdc1ac37792dd30d5b8c02711ab27e240238c2ec66a00 not found: ID does not exist" Nov 28 07:29:03 crc kubenswrapper[4922]: I1128 07:29:03.417777 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b16093ea-3dea-438c-a284-723af902088c" path="/var/lib/kubelet/pods/b16093ea-3dea-438c-a284-723af902088c/volumes" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.337538 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zms9z"] Nov 28 07:29:24 crc kubenswrapper[4922]: E1128 07:29:24.339644 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="extract-utilities" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.339676 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="extract-utilities" Nov 28 07:29:24 crc kubenswrapper[4922]: E1128 07:29:24.339708 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="extract-content" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.339718 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="extract-content" Nov 28 07:29:24 crc kubenswrapper[4922]: E1128 07:29:24.339736 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="registry-server" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.339743 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="registry-server" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.339911 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b16093ea-3dea-438c-a284-723af902088c" containerName="registry-server" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.341261 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.351295 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zms9z"] Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.415503 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-utilities\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.415642 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvr4f\" (UniqueName: \"kubernetes.io/projected/8ac05363-8aa1-40ac-a40d-a964662fb309-kube-api-access-vvr4f\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.415747 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-catalog-content\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.516753 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-catalog-content\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.516879 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-utilities\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.516922 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvr4f\" (UniqueName: \"kubernetes.io/projected/8ac05363-8aa1-40ac-a40d-a964662fb309-kube-api-access-vvr4f\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.517343 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-catalog-content\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.517644 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-utilities\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.542925 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvr4f\" (UniqueName: \"kubernetes.io/projected/8ac05363-8aa1-40ac-a40d-a964662fb309-kube-api-access-vvr4f\") pod \"community-operators-zms9z\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:24 crc kubenswrapper[4922]: I1128 07:29:24.666956 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:25 crc kubenswrapper[4922]: I1128 07:29:25.209748 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zms9z"] Nov 28 07:29:25 crc kubenswrapper[4922]: W1128 07:29:25.228342 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ac05363_8aa1_40ac_a40d_a964662fb309.slice/crio-a9f14f9fdfc98c030a78c55fa1cdfd84b79e00940adeac5e6f09864f05ff862d WatchSource:0}: Error finding container a9f14f9fdfc98c030a78c55fa1cdfd84b79e00940adeac5e6f09864f05ff862d: Status 404 returned error can't find the container with id a9f14f9fdfc98c030a78c55fa1cdfd84b79e00940adeac5e6f09864f05ff862d Nov 28 07:29:25 crc kubenswrapper[4922]: I1128 07:29:25.918789 4922 generic.go:334] "Generic (PLEG): container finished" podID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerID="5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05" exitCode=0 Nov 28 07:29:25 crc kubenswrapper[4922]: I1128 07:29:25.918915 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zms9z" event={"ID":"8ac05363-8aa1-40ac-a40d-a964662fb309","Type":"ContainerDied","Data":"5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05"} Nov 28 07:29:25 crc kubenswrapper[4922]: I1128 07:29:25.919041 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zms9z" event={"ID":"8ac05363-8aa1-40ac-a40d-a964662fb309","Type":"ContainerStarted","Data":"a9f14f9fdfc98c030a78c55fa1cdfd84b79e00940adeac5e6f09864f05ff862d"} Nov 28 07:29:26 crc kubenswrapper[4922]: I1128 07:29:26.926238 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zms9z" event={"ID":"8ac05363-8aa1-40ac-a40d-a964662fb309","Type":"ContainerStarted","Data":"073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf"} Nov 28 07:29:27 crc kubenswrapper[4922]: I1128 07:29:27.936795 4922 generic.go:334] "Generic (PLEG): container finished" podID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerID="073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf" exitCode=0 Nov 28 07:29:27 crc kubenswrapper[4922]: I1128 07:29:27.936846 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zms9z" event={"ID":"8ac05363-8aa1-40ac-a40d-a964662fb309","Type":"ContainerDied","Data":"073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf"} Nov 28 07:29:28 crc kubenswrapper[4922]: I1128 07:29:28.960708 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zms9z" event={"ID":"8ac05363-8aa1-40ac-a40d-a964662fb309","Type":"ContainerStarted","Data":"62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4"} Nov 28 07:29:28 crc kubenswrapper[4922]: I1128 07:29:28.982117 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zms9z" podStartSLOduration=2.448013712 podStartE2EDuration="4.982102661s" podCreationTimestamp="2025-11-28 07:29:24 +0000 UTC" firstStartedPulling="2025-11-28 07:29:25.920932922 +0000 UTC m=+2210.841328534" lastFinishedPulling="2025-11-28 07:29:28.455021871 +0000 UTC m=+2213.375417483" observedRunningTime="2025-11-28 07:29:28.980998472 +0000 UTC m=+2213.901394074" watchObservedRunningTime="2025-11-28 07:29:28.982102661 +0000 UTC m=+2213.902498243" Nov 28 07:29:34 crc kubenswrapper[4922]: I1128 07:29:34.668370 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:34 crc kubenswrapper[4922]: I1128 07:29:34.668638 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:35 crc kubenswrapper[4922]: I1128 07:29:34.709578 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:35 crc kubenswrapper[4922]: I1128 07:29:35.076467 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:35 crc kubenswrapper[4922]: I1128 07:29:35.134789 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zms9z"] Nov 28 07:29:37 crc kubenswrapper[4922]: I1128 07:29:37.035725 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zms9z" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerName="registry-server" containerID="cri-o://62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4" gracePeriod=2 Nov 28 07:29:37 crc kubenswrapper[4922]: I1128 07:29:37.997637 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.046984 4922 generic.go:334] "Generic (PLEG): container finished" podID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerID="62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4" exitCode=0 Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.047024 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zms9z" event={"ID":"8ac05363-8aa1-40ac-a40d-a964662fb309","Type":"ContainerDied","Data":"62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4"} Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.047050 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zms9z" event={"ID":"8ac05363-8aa1-40ac-a40d-a964662fb309","Type":"ContainerDied","Data":"a9f14f9fdfc98c030a78c55fa1cdfd84b79e00940adeac5e6f09864f05ff862d"} Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.047066 4922 scope.go:117] "RemoveContainer" containerID="62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.047074 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zms9z" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.077072 4922 scope.go:117] "RemoveContainer" containerID="073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.092864 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvr4f\" (UniqueName: \"kubernetes.io/projected/8ac05363-8aa1-40ac-a40d-a964662fb309-kube-api-access-vvr4f\") pod \"8ac05363-8aa1-40ac-a40d-a964662fb309\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.092911 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-utilities\") pod \"8ac05363-8aa1-40ac-a40d-a964662fb309\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.092978 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-catalog-content\") pod \"8ac05363-8aa1-40ac-a40d-a964662fb309\" (UID: \"8ac05363-8aa1-40ac-a40d-a964662fb309\") " Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.094454 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-utilities" (OuterVolumeSpecName: "utilities") pod "8ac05363-8aa1-40ac-a40d-a964662fb309" (UID: "8ac05363-8aa1-40ac-a40d-a964662fb309"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.097723 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ac05363-8aa1-40ac-a40d-a964662fb309-kube-api-access-vvr4f" (OuterVolumeSpecName: "kube-api-access-vvr4f") pod "8ac05363-8aa1-40ac-a40d-a964662fb309" (UID: "8ac05363-8aa1-40ac-a40d-a964662fb309"). InnerVolumeSpecName "kube-api-access-vvr4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.108282 4922 scope.go:117] "RemoveContainer" containerID="5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.142911 4922 scope.go:117] "RemoveContainer" containerID="62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4" Nov 28 07:29:38 crc kubenswrapper[4922]: E1128 07:29:38.143498 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4\": container with ID starting with 62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4 not found: ID does not exist" containerID="62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.143533 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4"} err="failed to get container status \"62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4\": rpc error: code = NotFound desc = could not find container \"62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4\": container with ID starting with 62891164cca9fe3f679efb28b898a3ed0e3ed0edca3622fefe0c1cec0be870e4 not found: ID does not exist" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.143556 4922 scope.go:117] "RemoveContainer" containerID="073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf" Nov 28 07:29:38 crc kubenswrapper[4922]: E1128 07:29:38.143895 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf\": container with ID starting with 073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf not found: ID does not exist" containerID="073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.143937 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf"} err="failed to get container status \"073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf\": rpc error: code = NotFound desc = could not find container \"073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf\": container with ID starting with 073386c3998c1d368fcb6350ac194a9046e74f763ac6234238cbd87ae17b52cf not found: ID does not exist" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.143964 4922 scope.go:117] "RemoveContainer" containerID="5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05" Nov 28 07:29:38 crc kubenswrapper[4922]: E1128 07:29:38.144362 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05\": container with ID starting with 5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05 not found: ID does not exist" containerID="5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.144386 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05"} err="failed to get container status \"5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05\": rpc error: code = NotFound desc = could not find container \"5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05\": container with ID starting with 5c05e897b4d5209096f977c703b5ed80868e2c33edd4d8cf963c4d1c52de8a05 not found: ID does not exist" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.151145 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ac05363-8aa1-40ac-a40d-a964662fb309" (UID: "8ac05363-8aa1-40ac-a40d-a964662fb309"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.194768 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvr4f\" (UniqueName: \"kubernetes.io/projected/8ac05363-8aa1-40ac-a40d-a964662fb309-kube-api-access-vvr4f\") on node \"crc\" DevicePath \"\"" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.195001 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.195155 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ac05363-8aa1-40ac-a40d-a964662fb309-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.386538 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zms9z"] Nov 28 07:29:38 crc kubenswrapper[4922]: I1128 07:29:38.398475 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zms9z"] Nov 28 07:29:39 crc kubenswrapper[4922]: I1128 07:29:39.410668 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" path="/var/lib/kubelet/pods/8ac05363-8aa1-40ac-a40d-a964662fb309/volumes" Nov 28 07:29:57 crc kubenswrapper[4922]: I1128 07:29:57.311711 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:29:57 crc kubenswrapper[4922]: I1128 07:29:57.312492 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.179761 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh"] Nov 28 07:30:00 crc kubenswrapper[4922]: E1128 07:30:00.180717 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerName="registry-server" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.180752 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerName="registry-server" Nov 28 07:30:00 crc kubenswrapper[4922]: E1128 07:30:00.180790 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerName="extract-content" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.180803 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerName="extract-content" Nov 28 07:30:00 crc kubenswrapper[4922]: E1128 07:30:00.180827 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerName="extract-utilities" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.180841 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerName="extract-utilities" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.181096 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ac05363-8aa1-40ac-a40d-a964662fb309" containerName="registry-server" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.182138 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.186271 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.188275 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.194386 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh"] Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.229295 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/69bc3824-6347-4236-aeb7-73bb720c3bb4-secret-volume\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.229360 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmld6\" (UniqueName: \"kubernetes.io/projected/69bc3824-6347-4236-aeb7-73bb720c3bb4-kube-api-access-xmld6\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.229797 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/69bc3824-6347-4236-aeb7-73bb720c3bb4-config-volume\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.331119 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/69bc3824-6347-4236-aeb7-73bb720c3bb4-config-volume\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.331172 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/69bc3824-6347-4236-aeb7-73bb720c3bb4-secret-volume\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.331198 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmld6\" (UniqueName: \"kubernetes.io/projected/69bc3824-6347-4236-aeb7-73bb720c3bb4-kube-api-access-xmld6\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.332015 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/69bc3824-6347-4236-aeb7-73bb720c3bb4-config-volume\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.340841 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/69bc3824-6347-4236-aeb7-73bb720c3bb4-secret-volume\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.350537 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmld6\" (UniqueName: \"kubernetes.io/projected/69bc3824-6347-4236-aeb7-73bb720c3bb4-kube-api-access-xmld6\") pod \"collect-profiles-29405250-blblh\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:00 crc kubenswrapper[4922]: I1128 07:30:00.510412 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:01 crc kubenswrapper[4922]: I1128 07:30:01.039420 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh"] Nov 28 07:30:01 crc kubenswrapper[4922]: W1128 07:30:01.047112 4922 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69bc3824_6347_4236_aeb7_73bb720c3bb4.slice/crio-c2c5142e14e8ee5b70ba91492ffc98866c2e257d82b044f5b56348d69e3ea723 WatchSource:0}: Error finding container c2c5142e14e8ee5b70ba91492ffc98866c2e257d82b044f5b56348d69e3ea723: Status 404 returned error can't find the container with id c2c5142e14e8ee5b70ba91492ffc98866c2e257d82b044f5b56348d69e3ea723 Nov 28 07:30:01 crc kubenswrapper[4922]: I1128 07:30:01.257811 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" event={"ID":"69bc3824-6347-4236-aeb7-73bb720c3bb4","Type":"ContainerStarted","Data":"1fab70576a508b782a5973981dbaa8746c5f124b9f3d39c261ab3da527bb2a45"} Nov 28 07:30:01 crc kubenswrapper[4922]: I1128 07:30:01.257866 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" event={"ID":"69bc3824-6347-4236-aeb7-73bb720c3bb4","Type":"ContainerStarted","Data":"c2c5142e14e8ee5b70ba91492ffc98866c2e257d82b044f5b56348d69e3ea723"} Nov 28 07:30:01 crc kubenswrapper[4922]: I1128 07:30:01.304698 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" podStartSLOduration=1.304675725 podStartE2EDuration="1.304675725s" podCreationTimestamp="2025-11-28 07:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 07:30:01.297710248 +0000 UTC m=+2246.218105850" watchObservedRunningTime="2025-11-28 07:30:01.304675725 +0000 UTC m=+2246.225071327" Nov 28 07:30:02 crc kubenswrapper[4922]: I1128 07:30:02.275076 4922 generic.go:334] "Generic (PLEG): container finished" podID="69bc3824-6347-4236-aeb7-73bb720c3bb4" containerID="1fab70576a508b782a5973981dbaa8746c5f124b9f3d39c261ab3da527bb2a45" exitCode=0 Nov 28 07:30:02 crc kubenswrapper[4922]: I1128 07:30:02.275423 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" event={"ID":"69bc3824-6347-4236-aeb7-73bb720c3bb4","Type":"ContainerDied","Data":"1fab70576a508b782a5973981dbaa8746c5f124b9f3d39c261ab3da527bb2a45"} Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.655206 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.690687 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/69bc3824-6347-4236-aeb7-73bb720c3bb4-config-volume\") pod \"69bc3824-6347-4236-aeb7-73bb720c3bb4\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.690842 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmld6\" (UniqueName: \"kubernetes.io/projected/69bc3824-6347-4236-aeb7-73bb720c3bb4-kube-api-access-xmld6\") pod \"69bc3824-6347-4236-aeb7-73bb720c3bb4\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.690868 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/69bc3824-6347-4236-aeb7-73bb720c3bb4-secret-volume\") pod \"69bc3824-6347-4236-aeb7-73bb720c3bb4\" (UID: \"69bc3824-6347-4236-aeb7-73bb720c3bb4\") " Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.692205 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69bc3824-6347-4236-aeb7-73bb720c3bb4-config-volume" (OuterVolumeSpecName: "config-volume") pod "69bc3824-6347-4236-aeb7-73bb720c3bb4" (UID: "69bc3824-6347-4236-aeb7-73bb720c3bb4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.699553 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69bc3824-6347-4236-aeb7-73bb720c3bb4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "69bc3824-6347-4236-aeb7-73bb720c3bb4" (UID: "69bc3824-6347-4236-aeb7-73bb720c3bb4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.699749 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69bc3824-6347-4236-aeb7-73bb720c3bb4-kube-api-access-xmld6" (OuterVolumeSpecName: "kube-api-access-xmld6") pod "69bc3824-6347-4236-aeb7-73bb720c3bb4" (UID: "69bc3824-6347-4236-aeb7-73bb720c3bb4"). InnerVolumeSpecName "kube-api-access-xmld6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.793856 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmld6\" (UniqueName: \"kubernetes.io/projected/69bc3824-6347-4236-aeb7-73bb720c3bb4-kube-api-access-xmld6\") on node \"crc\" DevicePath \"\"" Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.793950 4922 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/69bc3824-6347-4236-aeb7-73bb720c3bb4-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 07:30:03 crc kubenswrapper[4922]: I1128 07:30:03.794013 4922 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/69bc3824-6347-4236-aeb7-73bb720c3bb4-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 07:30:04 crc kubenswrapper[4922]: I1128 07:30:04.295566 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" event={"ID":"69bc3824-6347-4236-aeb7-73bb720c3bb4","Type":"ContainerDied","Data":"c2c5142e14e8ee5b70ba91492ffc98866c2e257d82b044f5b56348d69e3ea723"} Nov 28 07:30:04 crc kubenswrapper[4922]: I1128 07:30:04.295644 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2c5142e14e8ee5b70ba91492ffc98866c2e257d82b044f5b56348d69e3ea723" Nov 28 07:30:04 crc kubenswrapper[4922]: I1128 07:30:04.295673 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405250-blblh" Nov 28 07:30:04 crc kubenswrapper[4922]: I1128 07:30:04.768982 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g"] Nov 28 07:30:04 crc kubenswrapper[4922]: I1128 07:30:04.789035 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405205-gln9g"] Nov 28 07:30:05 crc kubenswrapper[4922]: I1128 07:30:05.415525 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="765b0f79-4316-4aab-90fa-a2aaa84380f0" path="/var/lib/kubelet/pods/765b0f79-4316-4aab-90fa-a2aaa84380f0/volumes" Nov 28 07:30:27 crc kubenswrapper[4922]: I1128 07:30:27.312009 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:30:27 crc kubenswrapper[4922]: I1128 07:30:27.312735 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:30:51 crc kubenswrapper[4922]: I1128 07:30:51.500353 4922 scope.go:117] "RemoveContainer" containerID="0cb5619676c403739d55e20f2d790f7119e146f00eb6cb6ab6e58f20f205afb0" Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.312762 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.313602 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.313691 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.314625 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.314712 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" gracePeriod=600 Nov 28 07:30:57 crc kubenswrapper[4922]: E1128 07:30:57.453699 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.821609 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" exitCode=0 Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.821772 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216"} Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.822261 4922 scope.go:117] "RemoveContainer" containerID="97cf555779e0029b2b84ed5757414d94cd98ef367047f53959a1e37429ebd992" Nov 28 07:30:57 crc kubenswrapper[4922]: I1128 07:30:57.823047 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:30:57 crc kubenswrapper[4922]: E1128 07:30:57.823522 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:31:12 crc kubenswrapper[4922]: I1128 07:31:12.398798 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:31:12 crc kubenswrapper[4922]: E1128 07:31:12.399949 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:31:25 crc kubenswrapper[4922]: I1128 07:31:25.427053 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:31:25 crc kubenswrapper[4922]: E1128 07:31:25.427717 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:31:36 crc kubenswrapper[4922]: I1128 07:31:36.398803 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:31:36 crc kubenswrapper[4922]: E1128 07:31:36.399915 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:31:47 crc kubenswrapper[4922]: I1128 07:31:47.398624 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:31:47 crc kubenswrapper[4922]: E1128 07:31:47.399647 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:31:59 crc kubenswrapper[4922]: I1128 07:31:59.398567 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:31:59 crc kubenswrapper[4922]: E1128 07:31:59.399520 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:32:11 crc kubenswrapper[4922]: I1128 07:32:11.398624 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:32:11 crc kubenswrapper[4922]: E1128 07:32:11.399656 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:32:24 crc kubenswrapper[4922]: I1128 07:32:24.399413 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:32:24 crc kubenswrapper[4922]: E1128 07:32:24.400532 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:32:37 crc kubenswrapper[4922]: I1128 07:32:37.399256 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:32:37 crc kubenswrapper[4922]: E1128 07:32:37.400015 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:32:50 crc kubenswrapper[4922]: I1128 07:32:50.399171 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:32:50 crc kubenswrapper[4922]: E1128 07:32:50.401944 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:33:01 crc kubenswrapper[4922]: I1128 07:33:01.398657 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:33:01 crc kubenswrapper[4922]: E1128 07:33:01.399991 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:33:13 crc kubenswrapper[4922]: I1128 07:33:13.399093 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:33:13 crc kubenswrapper[4922]: E1128 07:33:13.400080 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:33:25 crc kubenswrapper[4922]: I1128 07:33:25.406671 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:33:25 crc kubenswrapper[4922]: E1128 07:33:25.408981 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:33:39 crc kubenswrapper[4922]: I1128 07:33:39.399528 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:33:39 crc kubenswrapper[4922]: E1128 07:33:39.402158 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:33:51 crc kubenswrapper[4922]: I1128 07:33:51.400392 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:33:51 crc kubenswrapper[4922]: E1128 07:33:51.402066 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:34:04 crc kubenswrapper[4922]: I1128 07:34:04.398846 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:34:04 crc kubenswrapper[4922]: E1128 07:34:04.400004 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:34:17 crc kubenswrapper[4922]: I1128 07:34:17.399525 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:34:17 crc kubenswrapper[4922]: E1128 07:34:17.401869 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:34:28 crc kubenswrapper[4922]: I1128 07:34:28.398687 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:34:28 crc kubenswrapper[4922]: E1128 07:34:28.399588 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:34:40 crc kubenswrapper[4922]: I1128 07:34:40.398772 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:34:40 crc kubenswrapper[4922]: E1128 07:34:40.399783 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:34:52 crc kubenswrapper[4922]: I1128 07:34:52.398958 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:34:52 crc kubenswrapper[4922]: E1128 07:34:52.399799 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:35:04 crc kubenswrapper[4922]: I1128 07:35:04.398170 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:35:04 crc kubenswrapper[4922]: E1128 07:35:04.399351 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:35:17 crc kubenswrapper[4922]: I1128 07:35:17.398939 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:35:17 crc kubenswrapper[4922]: E1128 07:35:17.400034 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:35:31 crc kubenswrapper[4922]: I1128 07:35:31.401041 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:35:31 crc kubenswrapper[4922]: E1128 07:35:31.401965 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:35:45 crc kubenswrapper[4922]: I1128 07:35:45.406572 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:35:45 crc kubenswrapper[4922]: E1128 07:35:45.407524 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:35:57 crc kubenswrapper[4922]: I1128 07:35:57.399885 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:35:57 crc kubenswrapper[4922]: I1128 07:35:57.866654 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"bdd4b7e3a8a1bb8ee8f0c20c17ff0cfd5a46f195485e9f2be9c45464dabf6d09"} Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.017674 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vshwv"] Nov 28 07:37:33 crc kubenswrapper[4922]: E1128 07:37:33.018805 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69bc3824-6347-4236-aeb7-73bb720c3bb4" containerName="collect-profiles" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.018830 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="69bc3824-6347-4236-aeb7-73bb720c3bb4" containerName="collect-profiles" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.019141 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="69bc3824-6347-4236-aeb7-73bb720c3bb4" containerName="collect-profiles" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.020863 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.044201 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vshwv"] Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.085881 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r986d\" (UniqueName: \"kubernetes.io/projected/bf620c7f-343a-4960-b391-dcee0ff291d7-kube-api-access-r986d\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.086151 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-catalog-content\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.086238 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-utilities\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.188081 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r986d\" (UniqueName: \"kubernetes.io/projected/bf620c7f-343a-4960-b391-dcee0ff291d7-kube-api-access-r986d\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.188273 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-catalog-content\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.188321 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-utilities\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.189173 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-utilities\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.189348 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-catalog-content\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.210878 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r986d\" (UniqueName: \"kubernetes.io/projected/bf620c7f-343a-4960-b391-dcee0ff291d7-kube-api-access-r986d\") pod \"redhat-marketplace-vshwv\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.364724 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:33 crc kubenswrapper[4922]: I1128 07:37:33.861933 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vshwv"] Nov 28 07:37:34 crc kubenswrapper[4922]: I1128 07:37:34.852905 4922 generic.go:334] "Generic (PLEG): container finished" podID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerID="d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022" exitCode=0 Nov 28 07:37:34 crc kubenswrapper[4922]: I1128 07:37:34.852966 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vshwv" event={"ID":"bf620c7f-343a-4960-b391-dcee0ff291d7","Type":"ContainerDied","Data":"d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022"} Nov 28 07:37:34 crc kubenswrapper[4922]: I1128 07:37:34.853007 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vshwv" event={"ID":"bf620c7f-343a-4960-b391-dcee0ff291d7","Type":"ContainerStarted","Data":"eaa7bcd5fb86a646cea821038ce0a7de88a2d94c7d37e16acdffa77b35188d6e"} Nov 28 07:37:34 crc kubenswrapper[4922]: I1128 07:37:34.856190 4922 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 07:37:35 crc kubenswrapper[4922]: I1128 07:37:35.864358 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vshwv" event={"ID":"bf620c7f-343a-4960-b391-dcee0ff291d7","Type":"ContainerStarted","Data":"c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559"} Nov 28 07:37:36 crc kubenswrapper[4922]: I1128 07:37:36.884317 4922 generic.go:334] "Generic (PLEG): container finished" podID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerID="c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559" exitCode=0 Nov 28 07:37:36 crc kubenswrapper[4922]: I1128 07:37:36.885721 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vshwv" event={"ID":"bf620c7f-343a-4960-b391-dcee0ff291d7","Type":"ContainerDied","Data":"c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559"} Nov 28 07:37:37 crc kubenswrapper[4922]: I1128 07:37:37.899703 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vshwv" event={"ID":"bf620c7f-343a-4960-b391-dcee0ff291d7","Type":"ContainerStarted","Data":"7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149"} Nov 28 07:37:37 crc kubenswrapper[4922]: I1128 07:37:37.936620 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vshwv" podStartSLOduration=3.267697531 podStartE2EDuration="5.936415159s" podCreationTimestamp="2025-11-28 07:37:32 +0000 UTC" firstStartedPulling="2025-11-28 07:37:34.855809121 +0000 UTC m=+2699.776204743" lastFinishedPulling="2025-11-28 07:37:37.524526749 +0000 UTC m=+2702.444922371" observedRunningTime="2025-11-28 07:37:37.926903882 +0000 UTC m=+2702.847299504" watchObservedRunningTime="2025-11-28 07:37:37.936415159 +0000 UTC m=+2702.856810781" Nov 28 07:37:43 crc kubenswrapper[4922]: I1128 07:37:43.365409 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:43 crc kubenswrapper[4922]: I1128 07:37:43.365920 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:43 crc kubenswrapper[4922]: I1128 07:37:43.437632 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:44 crc kubenswrapper[4922]: I1128 07:37:44.028150 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:44 crc kubenswrapper[4922]: I1128 07:37:44.104879 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vshwv"] Nov 28 07:37:45 crc kubenswrapper[4922]: I1128 07:37:45.972757 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vshwv" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerName="registry-server" containerID="cri-o://7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149" gracePeriod=2 Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.439003 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.595486 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-catalog-content\") pod \"bf620c7f-343a-4960-b391-dcee0ff291d7\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.595575 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r986d\" (UniqueName: \"kubernetes.io/projected/bf620c7f-343a-4960-b391-dcee0ff291d7-kube-api-access-r986d\") pod \"bf620c7f-343a-4960-b391-dcee0ff291d7\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.595642 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-utilities\") pod \"bf620c7f-343a-4960-b391-dcee0ff291d7\" (UID: \"bf620c7f-343a-4960-b391-dcee0ff291d7\") " Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.597623 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-utilities" (OuterVolumeSpecName: "utilities") pod "bf620c7f-343a-4960-b391-dcee0ff291d7" (UID: "bf620c7f-343a-4960-b391-dcee0ff291d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.604969 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf620c7f-343a-4960-b391-dcee0ff291d7-kube-api-access-r986d" (OuterVolumeSpecName: "kube-api-access-r986d") pod "bf620c7f-343a-4960-b391-dcee0ff291d7" (UID: "bf620c7f-343a-4960-b391-dcee0ff291d7"). InnerVolumeSpecName "kube-api-access-r986d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.632418 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bf620c7f-343a-4960-b391-dcee0ff291d7" (UID: "bf620c7f-343a-4960-b391-dcee0ff291d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.697553 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.697605 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r986d\" (UniqueName: \"kubernetes.io/projected/bf620c7f-343a-4960-b391-dcee0ff291d7-kube-api-access-r986d\") on node \"crc\" DevicePath \"\"" Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.697634 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bf620c7f-343a-4960-b391-dcee0ff291d7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.988870 4922 generic.go:334] "Generic (PLEG): container finished" podID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerID="7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149" exitCode=0 Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.988936 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vshwv" event={"ID":"bf620c7f-343a-4960-b391-dcee0ff291d7","Type":"ContainerDied","Data":"7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149"} Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.988981 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vshwv" event={"ID":"bf620c7f-343a-4960-b391-dcee0ff291d7","Type":"ContainerDied","Data":"eaa7bcd5fb86a646cea821038ce0a7de88a2d94c7d37e16acdffa77b35188d6e"} Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.988982 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vshwv" Nov 28 07:37:46 crc kubenswrapper[4922]: I1128 07:37:46.989010 4922 scope.go:117] "RemoveContainer" containerID="7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.038152 4922 scope.go:117] "RemoveContainer" containerID="c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.055353 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vshwv"] Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.062486 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vshwv"] Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.074710 4922 scope.go:117] "RemoveContainer" containerID="d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.112105 4922 scope.go:117] "RemoveContainer" containerID="7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149" Nov 28 07:37:47 crc kubenswrapper[4922]: E1128 07:37:47.112964 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149\": container with ID starting with 7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149 not found: ID does not exist" containerID="7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.113055 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149"} err="failed to get container status \"7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149\": rpc error: code = NotFound desc = could not find container \"7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149\": container with ID starting with 7cb26321705c5993c12140d9542527dcb7a7a1f42fb49e70eed1b7c781b01149 not found: ID does not exist" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.113096 4922 scope.go:117] "RemoveContainer" containerID="c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559" Nov 28 07:37:47 crc kubenswrapper[4922]: E1128 07:37:47.114091 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559\": container with ID starting with c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559 not found: ID does not exist" containerID="c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.114194 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559"} err="failed to get container status \"c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559\": rpc error: code = NotFound desc = could not find container \"c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559\": container with ID starting with c6eab2abf11ff37648879a76af4216b3760d120e5b45c9c86ba6b61b776ad559 not found: ID does not exist" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.114285 4922 scope.go:117] "RemoveContainer" containerID="d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022" Nov 28 07:37:47 crc kubenswrapper[4922]: E1128 07:37:47.115483 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022\": container with ID starting with d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022 not found: ID does not exist" containerID="d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.115562 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022"} err="failed to get container status \"d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022\": rpc error: code = NotFound desc = could not find container \"d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022\": container with ID starting with d97675ad11618a173eb90b4b40efbcada9a579c37253a40bc0b35e35067d8022 not found: ID does not exist" Nov 28 07:37:47 crc kubenswrapper[4922]: I1128 07:37:47.418566 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" path="/var/lib/kubelet/pods/bf620c7f-343a-4960-b391-dcee0ff291d7/volumes" Nov 28 07:37:57 crc kubenswrapper[4922]: I1128 07:37:57.312273 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:37:57 crc kubenswrapper[4922]: I1128 07:37:57.313084 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:38:27 crc kubenswrapper[4922]: I1128 07:38:27.311981 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:38:27 crc kubenswrapper[4922]: I1128 07:38:27.312706 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.807744 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kqv64"] Nov 28 07:38:49 crc kubenswrapper[4922]: E1128 07:38:49.809328 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerName="registry-server" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.809390 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerName="registry-server" Nov 28 07:38:49 crc kubenswrapper[4922]: E1128 07:38:49.809416 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerName="extract-utilities" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.809433 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerName="extract-utilities" Nov 28 07:38:49 crc kubenswrapper[4922]: E1128 07:38:49.809472 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerName="extract-content" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.809492 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerName="extract-content" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.809839 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf620c7f-343a-4960-b391-dcee0ff291d7" containerName="registry-server" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.812213 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.821948 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kqv64"] Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.841806 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-utilities\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.841907 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-catalog-content\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.842403 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7g67\" (UniqueName: \"kubernetes.io/projected/39b21e2d-6a77-41e5-9e96-3217bbec4892-kube-api-access-x7g67\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.943789 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7g67\" (UniqueName: \"kubernetes.io/projected/39b21e2d-6a77-41e5-9e96-3217bbec4892-kube-api-access-x7g67\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.944155 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-utilities\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.944358 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-catalog-content\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.944822 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-utilities\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.944875 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-catalog-content\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:49 crc kubenswrapper[4922]: I1128 07:38:49.967463 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7g67\" (UniqueName: \"kubernetes.io/projected/39b21e2d-6a77-41e5-9e96-3217bbec4892-kube-api-access-x7g67\") pod \"certified-operators-kqv64\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:50 crc kubenswrapper[4922]: I1128 07:38:50.173943 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:38:50 crc kubenswrapper[4922]: I1128 07:38:50.667312 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kqv64"] Nov 28 07:38:51 crc kubenswrapper[4922]: I1128 07:38:51.261314 4922 generic.go:334] "Generic (PLEG): container finished" podID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerID="c8311e5d9002d4f334946952641adf047d5da25c4af3160a0d5fd0d736e3da6f" exitCode=0 Nov 28 07:38:51 crc kubenswrapper[4922]: I1128 07:38:51.261415 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kqv64" event={"ID":"39b21e2d-6a77-41e5-9e96-3217bbec4892","Type":"ContainerDied","Data":"c8311e5d9002d4f334946952641adf047d5da25c4af3160a0d5fd0d736e3da6f"} Nov 28 07:38:51 crc kubenswrapper[4922]: I1128 07:38:51.261843 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kqv64" event={"ID":"39b21e2d-6a77-41e5-9e96-3217bbec4892","Type":"ContainerStarted","Data":"083ea532789a39e10a7c685ab1f6a4f7d5195754861cd9c161fdb4afb17b7ff6"} Nov 28 07:38:52 crc kubenswrapper[4922]: I1128 07:38:52.277430 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kqv64" event={"ID":"39b21e2d-6a77-41e5-9e96-3217bbec4892","Type":"ContainerStarted","Data":"519815f86d76c4d50197edf715a76c2ffd880d66f9398604ef38eb9764847bb7"} Nov 28 07:38:53 crc kubenswrapper[4922]: I1128 07:38:53.288260 4922 generic.go:334] "Generic (PLEG): container finished" podID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerID="519815f86d76c4d50197edf715a76c2ffd880d66f9398604ef38eb9764847bb7" exitCode=0 Nov 28 07:38:53 crc kubenswrapper[4922]: I1128 07:38:53.288345 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kqv64" event={"ID":"39b21e2d-6a77-41e5-9e96-3217bbec4892","Type":"ContainerDied","Data":"519815f86d76c4d50197edf715a76c2ffd880d66f9398604ef38eb9764847bb7"} Nov 28 07:38:54 crc kubenswrapper[4922]: I1128 07:38:54.301108 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kqv64" event={"ID":"39b21e2d-6a77-41e5-9e96-3217bbec4892","Type":"ContainerStarted","Data":"e71dc4514a0a44740d2773515fac0a2f153c5ad32ac0e91f835275d3df92dd1c"} Nov 28 07:38:54 crc kubenswrapper[4922]: I1128 07:38:54.329989 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kqv64" podStartSLOduration=2.8436342740000002 podStartE2EDuration="5.32996418s" podCreationTimestamp="2025-11-28 07:38:49 +0000 UTC" firstStartedPulling="2025-11-28 07:38:51.264871503 +0000 UTC m=+2776.185267125" lastFinishedPulling="2025-11-28 07:38:53.751201439 +0000 UTC m=+2778.671597031" observedRunningTime="2025-11-28 07:38:54.32663756 +0000 UTC m=+2779.247033172" watchObservedRunningTime="2025-11-28 07:38:54.32996418 +0000 UTC m=+2779.250359792" Nov 28 07:38:57 crc kubenswrapper[4922]: I1128 07:38:57.312752 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:38:57 crc kubenswrapper[4922]: I1128 07:38:57.313296 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:38:57 crc kubenswrapper[4922]: I1128 07:38:57.313379 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:38:57 crc kubenswrapper[4922]: I1128 07:38:57.314454 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bdd4b7e3a8a1bb8ee8f0c20c17ff0cfd5a46f195485e9f2be9c45464dabf6d09"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:38:57 crc kubenswrapper[4922]: I1128 07:38:57.314580 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://bdd4b7e3a8a1bb8ee8f0c20c17ff0cfd5a46f195485e9f2be9c45464dabf6d09" gracePeriod=600 Nov 28 07:38:58 crc kubenswrapper[4922]: I1128 07:38:58.341546 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="bdd4b7e3a8a1bb8ee8f0c20c17ff0cfd5a46f195485e9f2be9c45464dabf6d09" exitCode=0 Nov 28 07:38:58 crc kubenswrapper[4922]: I1128 07:38:58.341602 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"bdd4b7e3a8a1bb8ee8f0c20c17ff0cfd5a46f195485e9f2be9c45464dabf6d09"} Nov 28 07:38:58 crc kubenswrapper[4922]: I1128 07:38:58.341851 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b"} Nov 28 07:38:58 crc kubenswrapper[4922]: I1128 07:38:58.341872 4922 scope.go:117] "RemoveContainer" containerID="cdad39abe9139ea49bf9818f51213625cb00da5121e90b9df2fb542689a47216" Nov 28 07:39:00 crc kubenswrapper[4922]: I1128 07:39:00.174824 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:39:00 crc kubenswrapper[4922]: I1128 07:39:00.175269 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:39:00 crc kubenswrapper[4922]: I1128 07:39:00.246766 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:39:00 crc kubenswrapper[4922]: I1128 07:39:00.418551 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:39:00 crc kubenswrapper[4922]: I1128 07:39:00.484697 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kqv64"] Nov 28 07:39:02 crc kubenswrapper[4922]: I1128 07:39:02.382866 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kqv64" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerName="registry-server" containerID="cri-o://e71dc4514a0a44740d2773515fac0a2f153c5ad32ac0e91f835275d3df92dd1c" gracePeriod=2 Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.221676 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jf7ss"] Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.224852 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.236952 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jf7ss"] Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.278173 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-catalog-content\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.278258 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgcsp\" (UniqueName: \"kubernetes.io/projected/a59d4927-1399-47de-8667-8902a25be055-kube-api-access-rgcsp\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.278394 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-utilities\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.379640 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-utilities\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.380026 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-catalog-content\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.380077 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgcsp\" (UniqueName: \"kubernetes.io/projected/a59d4927-1399-47de-8667-8902a25be055-kube-api-access-rgcsp\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.380295 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-utilities\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.380486 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-catalog-content\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.392033 4922 generic.go:334] "Generic (PLEG): container finished" podID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerID="e71dc4514a0a44740d2773515fac0a2f153c5ad32ac0e91f835275d3df92dd1c" exitCode=0 Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.392084 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kqv64" event={"ID":"39b21e2d-6a77-41e5-9e96-3217bbec4892","Type":"ContainerDied","Data":"e71dc4514a0a44740d2773515fac0a2f153c5ad32ac0e91f835275d3df92dd1c"} Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.392117 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kqv64" event={"ID":"39b21e2d-6a77-41e5-9e96-3217bbec4892","Type":"ContainerDied","Data":"083ea532789a39e10a7c685ab1f6a4f7d5195754861cd9c161fdb4afb17b7ff6"} Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.392130 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="083ea532789a39e10a7c685ab1f6a4f7d5195754861cd9c161fdb4afb17b7ff6" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.398928 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgcsp\" (UniqueName: \"kubernetes.io/projected/a59d4927-1399-47de-8667-8902a25be055-kube-api-access-rgcsp\") pod \"redhat-operators-jf7ss\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.445580 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.482763 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7g67\" (UniqueName: \"kubernetes.io/projected/39b21e2d-6a77-41e5-9e96-3217bbec4892-kube-api-access-x7g67\") pod \"39b21e2d-6a77-41e5-9e96-3217bbec4892\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.482965 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-utilities\") pod \"39b21e2d-6a77-41e5-9e96-3217bbec4892\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.483004 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-catalog-content\") pod \"39b21e2d-6a77-41e5-9e96-3217bbec4892\" (UID: \"39b21e2d-6a77-41e5-9e96-3217bbec4892\") " Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.485459 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-utilities" (OuterVolumeSpecName: "utilities") pod "39b21e2d-6a77-41e5-9e96-3217bbec4892" (UID: "39b21e2d-6a77-41e5-9e96-3217bbec4892"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.489365 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39b21e2d-6a77-41e5-9e96-3217bbec4892-kube-api-access-x7g67" (OuterVolumeSpecName: "kube-api-access-x7g67") pod "39b21e2d-6a77-41e5-9e96-3217bbec4892" (UID: "39b21e2d-6a77-41e5-9e96-3217bbec4892"). InnerVolumeSpecName "kube-api-access-x7g67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.535994 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "39b21e2d-6a77-41e5-9e96-3217bbec4892" (UID: "39b21e2d-6a77-41e5-9e96-3217bbec4892"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.565241 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.584023 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7g67\" (UniqueName: \"kubernetes.io/projected/39b21e2d-6a77-41e5-9e96-3217bbec4892-kube-api-access-x7g67\") on node \"crc\" DevicePath \"\"" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.584046 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.584055 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/39b21e2d-6a77-41e5-9e96-3217bbec4892-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:39:03 crc kubenswrapper[4922]: I1128 07:39:03.775036 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jf7ss"] Nov 28 07:39:04 crc kubenswrapper[4922]: I1128 07:39:04.407390 4922 generic.go:334] "Generic (PLEG): container finished" podID="a59d4927-1399-47de-8667-8902a25be055" containerID="27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc" exitCode=0 Nov 28 07:39:04 crc kubenswrapper[4922]: I1128 07:39:04.407461 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jf7ss" event={"ID":"a59d4927-1399-47de-8667-8902a25be055","Type":"ContainerDied","Data":"27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc"} Nov 28 07:39:04 crc kubenswrapper[4922]: I1128 07:39:04.407773 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kqv64" Nov 28 07:39:04 crc kubenswrapper[4922]: I1128 07:39:04.407790 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jf7ss" event={"ID":"a59d4927-1399-47de-8667-8902a25be055","Type":"ContainerStarted","Data":"7be6558d06e1812cb13193e14d34a8aa473bd2e424b20e632584fb3b41c6f594"} Nov 28 07:39:04 crc kubenswrapper[4922]: I1128 07:39:04.455713 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kqv64"] Nov 28 07:39:04 crc kubenswrapper[4922]: I1128 07:39:04.460479 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kqv64"] Nov 28 07:39:05 crc kubenswrapper[4922]: I1128 07:39:05.412673 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" path="/var/lib/kubelet/pods/39b21e2d-6a77-41e5-9e96-3217bbec4892/volumes" Nov 28 07:39:06 crc kubenswrapper[4922]: I1128 07:39:06.438844 4922 generic.go:334] "Generic (PLEG): container finished" podID="a59d4927-1399-47de-8667-8902a25be055" containerID="1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a" exitCode=0 Nov 28 07:39:06 crc kubenswrapper[4922]: I1128 07:39:06.438903 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jf7ss" event={"ID":"a59d4927-1399-47de-8667-8902a25be055","Type":"ContainerDied","Data":"1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a"} Nov 28 07:39:07 crc kubenswrapper[4922]: I1128 07:39:07.455393 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jf7ss" event={"ID":"a59d4927-1399-47de-8667-8902a25be055","Type":"ContainerStarted","Data":"76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3"} Nov 28 07:39:07 crc kubenswrapper[4922]: I1128 07:39:07.487246 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jf7ss" podStartSLOduration=1.952208355 podStartE2EDuration="4.487194599s" podCreationTimestamp="2025-11-28 07:39:03 +0000 UTC" firstStartedPulling="2025-11-28 07:39:04.413386196 +0000 UTC m=+2789.333781778" lastFinishedPulling="2025-11-28 07:39:06.94837242 +0000 UTC m=+2791.868768022" observedRunningTime="2025-11-28 07:39:07.479726226 +0000 UTC m=+2792.400121838" watchObservedRunningTime="2025-11-28 07:39:07.487194599 +0000 UTC m=+2792.407590211" Nov 28 07:39:13 crc kubenswrapper[4922]: I1128 07:39:13.566524 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:13 crc kubenswrapper[4922]: I1128 07:39:13.567275 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:14 crc kubenswrapper[4922]: I1128 07:39:14.621162 4922 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jf7ss" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="registry-server" probeResult="failure" output=< Nov 28 07:39:14 crc kubenswrapper[4922]: timeout: failed to connect service ":50051" within 1s Nov 28 07:39:14 crc kubenswrapper[4922]: > Nov 28 07:39:23 crc kubenswrapper[4922]: I1128 07:39:23.647837 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:23 crc kubenswrapper[4922]: I1128 07:39:23.734201 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:23 crc kubenswrapper[4922]: I1128 07:39:23.900287 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jf7ss"] Nov 28 07:39:25 crc kubenswrapper[4922]: I1128 07:39:25.628857 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jf7ss" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="registry-server" containerID="cri-o://76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3" gracePeriod=2 Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.065065 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.155635 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-catalog-content\") pod \"a59d4927-1399-47de-8667-8902a25be055\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.155954 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-utilities\") pod \"a59d4927-1399-47de-8667-8902a25be055\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.155993 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgcsp\" (UniqueName: \"kubernetes.io/projected/a59d4927-1399-47de-8667-8902a25be055-kube-api-access-rgcsp\") pod \"a59d4927-1399-47de-8667-8902a25be055\" (UID: \"a59d4927-1399-47de-8667-8902a25be055\") " Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.157522 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-utilities" (OuterVolumeSpecName: "utilities") pod "a59d4927-1399-47de-8667-8902a25be055" (UID: "a59d4927-1399-47de-8667-8902a25be055"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.161708 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a59d4927-1399-47de-8667-8902a25be055-kube-api-access-rgcsp" (OuterVolumeSpecName: "kube-api-access-rgcsp") pod "a59d4927-1399-47de-8667-8902a25be055" (UID: "a59d4927-1399-47de-8667-8902a25be055"). InnerVolumeSpecName "kube-api-access-rgcsp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.257400 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.257438 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgcsp\" (UniqueName: \"kubernetes.io/projected/a59d4927-1399-47de-8667-8902a25be055-kube-api-access-rgcsp\") on node \"crc\" DevicePath \"\"" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.271412 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a59d4927-1399-47de-8667-8902a25be055" (UID: "a59d4927-1399-47de-8667-8902a25be055"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.359209 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a59d4927-1399-47de-8667-8902a25be055-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.650079 4922 generic.go:334] "Generic (PLEG): container finished" podID="a59d4927-1399-47de-8667-8902a25be055" containerID="76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3" exitCode=0 Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.650142 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jf7ss" event={"ID":"a59d4927-1399-47de-8667-8902a25be055","Type":"ContainerDied","Data":"76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3"} Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.650179 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jf7ss" event={"ID":"a59d4927-1399-47de-8667-8902a25be055","Type":"ContainerDied","Data":"7be6558d06e1812cb13193e14d34a8aa473bd2e424b20e632584fb3b41c6f594"} Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.650205 4922 scope.go:117] "RemoveContainer" containerID="76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.650433 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jf7ss" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.677719 4922 scope.go:117] "RemoveContainer" containerID="1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.711523 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jf7ss"] Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.719021 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jf7ss"] Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.725438 4922 scope.go:117] "RemoveContainer" containerID="27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.746029 4922 scope.go:117] "RemoveContainer" containerID="76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3" Nov 28 07:39:26 crc kubenswrapper[4922]: E1128 07:39:26.747029 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3\": container with ID starting with 76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3 not found: ID does not exist" containerID="76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.747111 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3"} err="failed to get container status \"76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3\": rpc error: code = NotFound desc = could not find container \"76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3\": container with ID starting with 76b921a90fd13ab1de09964eb86f14a4d2ffa4cc570a7cbce3115ec44932eae3 not found: ID does not exist" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.747149 4922 scope.go:117] "RemoveContainer" containerID="1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a" Nov 28 07:39:26 crc kubenswrapper[4922]: E1128 07:39:26.747922 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a\": container with ID starting with 1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a not found: ID does not exist" containerID="1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.747991 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a"} err="failed to get container status \"1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a\": rpc error: code = NotFound desc = could not find container \"1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a\": container with ID starting with 1eb0daea7953f426b592ca4afe622c186447ce96d7d448ad083f61b0fabbdb3a not found: ID does not exist" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.748031 4922 scope.go:117] "RemoveContainer" containerID="27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc" Nov 28 07:39:26 crc kubenswrapper[4922]: E1128 07:39:26.748512 4922 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc\": container with ID starting with 27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc not found: ID does not exist" containerID="27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc" Nov 28 07:39:26 crc kubenswrapper[4922]: I1128 07:39:26.748596 4922 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc"} err="failed to get container status \"27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc\": rpc error: code = NotFound desc = could not find container \"27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc\": container with ID starting with 27d0b5c9eea714aba1c7d9a7acdcbeb3a7a51579513a676e9aa6b9e23d7659dc not found: ID does not exist" Nov 28 07:39:27 crc kubenswrapper[4922]: I1128 07:39:27.415091 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a59d4927-1399-47de-8667-8902a25be055" path="/var/lib/kubelet/pods/a59d4927-1399-47de-8667-8902a25be055/volumes" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.602396 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4wtcs"] Nov 28 07:40:03 crc kubenswrapper[4922]: E1128 07:40:03.603703 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerName="extract-content" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.603730 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerName="extract-content" Nov 28 07:40:03 crc kubenswrapper[4922]: E1128 07:40:03.603759 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="extract-content" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.603774 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="extract-content" Nov 28 07:40:03 crc kubenswrapper[4922]: E1128 07:40:03.603806 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerName="registry-server" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.603820 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerName="registry-server" Nov 28 07:40:03 crc kubenswrapper[4922]: E1128 07:40:03.603854 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerName="extract-utilities" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.603867 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerName="extract-utilities" Nov 28 07:40:03 crc kubenswrapper[4922]: E1128 07:40:03.603885 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="extract-utilities" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.603897 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="extract-utilities" Nov 28 07:40:03 crc kubenswrapper[4922]: E1128 07:40:03.603914 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="registry-server" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.603926 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="registry-server" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.604170 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="a59d4927-1399-47de-8667-8902a25be055" containerName="registry-server" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.604201 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="39b21e2d-6a77-41e5-9e96-3217bbec4892" containerName="registry-server" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.606718 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.613629 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4wtcs"] Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.776329 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp7wd\" (UniqueName: \"kubernetes.io/projected/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-kube-api-access-fp7wd\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.776505 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-catalog-content\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.776724 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-utilities\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.878441 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-utilities\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.878539 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp7wd\" (UniqueName: \"kubernetes.io/projected/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-kube-api-access-fp7wd\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.878638 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-catalog-content\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.878938 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-utilities\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.879298 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-catalog-content\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.906365 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp7wd\" (UniqueName: \"kubernetes.io/projected/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-kube-api-access-fp7wd\") pod \"community-operators-4wtcs\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:03 crc kubenswrapper[4922]: I1128 07:40:03.944109 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:04 crc kubenswrapper[4922]: I1128 07:40:04.226276 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4wtcs"] Nov 28 07:40:05 crc kubenswrapper[4922]: I1128 07:40:05.038873 4922 generic.go:334] "Generic (PLEG): container finished" podID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerID="1db3f758464db647eeea3ef9251d13cb09279bf4ab1e94b816b2bdee6e5efec5" exitCode=0 Nov 28 07:40:05 crc kubenswrapper[4922]: I1128 07:40:05.038916 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wtcs" event={"ID":"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824","Type":"ContainerDied","Data":"1db3f758464db647eeea3ef9251d13cb09279bf4ab1e94b816b2bdee6e5efec5"} Nov 28 07:40:05 crc kubenswrapper[4922]: I1128 07:40:05.039338 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wtcs" event={"ID":"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824","Type":"ContainerStarted","Data":"c9c6ff7d73cbae55b66b011f4be92abb561e233678e6482733a3653fb41c1a11"} Nov 28 07:40:07 crc kubenswrapper[4922]: I1128 07:40:07.057024 4922 generic.go:334] "Generic (PLEG): container finished" podID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerID="413977799b7dfb5fa6f97b75fcc94ff638dfa29d6d296fab3063ec2a12495aad" exitCode=0 Nov 28 07:40:07 crc kubenswrapper[4922]: I1128 07:40:07.057254 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wtcs" event={"ID":"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824","Type":"ContainerDied","Data":"413977799b7dfb5fa6f97b75fcc94ff638dfa29d6d296fab3063ec2a12495aad"} Nov 28 07:40:09 crc kubenswrapper[4922]: I1128 07:40:09.073794 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wtcs" event={"ID":"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824","Type":"ContainerStarted","Data":"2ff9320e1967fd44079a416bf2a4e6daa0081ecb3fb4f9aae1e13e9d1fa80191"} Nov 28 07:40:09 crc kubenswrapper[4922]: I1128 07:40:09.098938 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4wtcs" podStartSLOduration=3.228408323 podStartE2EDuration="6.098918548s" podCreationTimestamp="2025-11-28 07:40:03 +0000 UTC" firstStartedPulling="2025-11-28 07:40:05.04132652 +0000 UTC m=+2849.961722102" lastFinishedPulling="2025-11-28 07:40:07.911836705 +0000 UTC m=+2852.832232327" observedRunningTime="2025-11-28 07:40:09.095310121 +0000 UTC m=+2854.015705743" watchObservedRunningTime="2025-11-28 07:40:09.098918548 +0000 UTC m=+2854.019314130" Nov 28 07:40:13 crc kubenswrapper[4922]: I1128 07:40:13.945319 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:13 crc kubenswrapper[4922]: I1128 07:40:13.945954 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:14 crc kubenswrapper[4922]: I1128 07:40:14.015301 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:14 crc kubenswrapper[4922]: I1128 07:40:14.228766 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:14 crc kubenswrapper[4922]: I1128 07:40:14.288513 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4wtcs"] Nov 28 07:40:16 crc kubenswrapper[4922]: I1128 07:40:16.145295 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4wtcs" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerName="registry-server" containerID="cri-o://2ff9320e1967fd44079a416bf2a4e6daa0081ecb3fb4f9aae1e13e9d1fa80191" gracePeriod=2 Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.161046 4922 generic.go:334] "Generic (PLEG): container finished" podID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerID="2ff9320e1967fd44079a416bf2a4e6daa0081ecb3fb4f9aae1e13e9d1fa80191" exitCode=0 Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.161156 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wtcs" event={"ID":"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824","Type":"ContainerDied","Data":"2ff9320e1967fd44079a416bf2a4e6daa0081ecb3fb4f9aae1e13e9d1fa80191"} Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.269005 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.398688 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fp7wd\" (UniqueName: \"kubernetes.io/projected/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-kube-api-access-fp7wd\") pod \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.399081 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-utilities\") pod \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.399139 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-catalog-content\") pod \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\" (UID: \"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824\") " Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.400450 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-utilities" (OuterVolumeSpecName: "utilities") pod "b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" (UID: "b7918d4d-1d77-4f21-b7f3-7d7eff8b2824"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.407365 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-kube-api-access-fp7wd" (OuterVolumeSpecName: "kube-api-access-fp7wd") pod "b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" (UID: "b7918d4d-1d77-4f21-b7f3-7d7eff8b2824"). InnerVolumeSpecName "kube-api-access-fp7wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.487936 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" (UID: "b7918d4d-1d77-4f21-b7f3-7d7eff8b2824"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.501303 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fp7wd\" (UniqueName: \"kubernetes.io/projected/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-kube-api-access-fp7wd\") on node \"crc\" DevicePath \"\"" Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.501349 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:40:17 crc kubenswrapper[4922]: I1128 07:40:17.501362 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:40:18 crc kubenswrapper[4922]: I1128 07:40:18.178610 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wtcs" event={"ID":"b7918d4d-1d77-4f21-b7f3-7d7eff8b2824","Type":"ContainerDied","Data":"c9c6ff7d73cbae55b66b011f4be92abb561e233678e6482733a3653fb41c1a11"} Nov 28 07:40:18 crc kubenswrapper[4922]: I1128 07:40:18.178676 4922 scope.go:117] "RemoveContainer" containerID="2ff9320e1967fd44079a416bf2a4e6daa0081ecb3fb4f9aae1e13e9d1fa80191" Nov 28 07:40:18 crc kubenswrapper[4922]: I1128 07:40:18.178747 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4wtcs" Nov 28 07:40:18 crc kubenswrapper[4922]: I1128 07:40:18.204741 4922 scope.go:117] "RemoveContainer" containerID="413977799b7dfb5fa6f97b75fcc94ff638dfa29d6d296fab3063ec2a12495aad" Nov 28 07:40:18 crc kubenswrapper[4922]: I1128 07:40:18.231037 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4wtcs"] Nov 28 07:40:18 crc kubenswrapper[4922]: I1128 07:40:18.237718 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4wtcs"] Nov 28 07:40:18 crc kubenswrapper[4922]: I1128 07:40:18.256816 4922 scope.go:117] "RemoveContainer" containerID="1db3f758464db647eeea3ef9251d13cb09279bf4ab1e94b816b2bdee6e5efec5" Nov 28 07:40:19 crc kubenswrapper[4922]: I1128 07:40:19.414955 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" path="/var/lib/kubelet/pods/b7918d4d-1d77-4f21-b7f3-7d7eff8b2824/volumes" Nov 28 07:41:27 crc kubenswrapper[4922]: I1128 07:41:27.312500 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:41:27 crc kubenswrapper[4922]: I1128 07:41:27.313312 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:41:57 crc kubenswrapper[4922]: I1128 07:41:57.311870 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:41:57 crc kubenswrapper[4922]: I1128 07:41:57.312642 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.312235 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.312870 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.312932 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.313786 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.313873 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" gracePeriod=600 Nov 28 07:42:27 crc kubenswrapper[4922]: E1128 07:42:27.444025 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.480922 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" exitCode=0 Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.480967 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b"} Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.480999 4922 scope.go:117] "RemoveContainer" containerID="bdd4b7e3a8a1bb8ee8f0c20c17ff0cfd5a46f195485e9f2be9c45464dabf6d09" Nov 28 07:42:27 crc kubenswrapper[4922]: I1128 07:42:27.482029 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:42:27 crc kubenswrapper[4922]: E1128 07:42:27.482541 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:42:38 crc kubenswrapper[4922]: I1128 07:42:38.399042 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:42:38 crc kubenswrapper[4922]: E1128 07:42:38.400544 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:42:49 crc kubenswrapper[4922]: I1128 07:42:49.400418 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:42:49 crc kubenswrapper[4922]: E1128 07:42:49.401430 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:43:04 crc kubenswrapper[4922]: I1128 07:43:04.398645 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:43:04 crc kubenswrapper[4922]: E1128 07:43:04.399773 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:43:15 crc kubenswrapper[4922]: I1128 07:43:15.403616 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:43:15 crc kubenswrapper[4922]: E1128 07:43:15.404383 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:43:26 crc kubenswrapper[4922]: I1128 07:43:26.398797 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:43:26 crc kubenswrapper[4922]: E1128 07:43:26.399760 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:43:39 crc kubenswrapper[4922]: I1128 07:43:39.399385 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:43:39 crc kubenswrapper[4922]: E1128 07:43:39.400596 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:43:51 crc kubenswrapper[4922]: I1128 07:43:51.398753 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:43:51 crc kubenswrapper[4922]: E1128 07:43:51.399972 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:44:06 crc kubenswrapper[4922]: I1128 07:44:06.398801 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:44:06 crc kubenswrapper[4922]: E1128 07:44:06.401520 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:44:19 crc kubenswrapper[4922]: I1128 07:44:19.398724 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:44:19 crc kubenswrapper[4922]: E1128 07:44:19.399943 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:44:32 crc kubenswrapper[4922]: I1128 07:44:32.398771 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:44:32 crc kubenswrapper[4922]: E1128 07:44:32.399823 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:44:46 crc kubenswrapper[4922]: I1128 07:44:46.398693 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:44:46 crc kubenswrapper[4922]: E1128 07:44:46.399722 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:44:51 crc kubenswrapper[4922]: I1128 07:44:51.926431 4922 scope.go:117] "RemoveContainer" containerID="c8311e5d9002d4f334946952641adf047d5da25c4af3160a0d5fd0d736e3da6f" Nov 28 07:44:51 crc kubenswrapper[4922]: I1128 07:44:51.954655 4922 scope.go:117] "RemoveContainer" containerID="519815f86d76c4d50197edf715a76c2ffd880d66f9398604ef38eb9764847bb7" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.170709 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl"] Nov 28 07:45:00 crc kubenswrapper[4922]: E1128 07:45:00.171801 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerName="registry-server" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.171825 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerName="registry-server" Nov 28 07:45:00 crc kubenswrapper[4922]: E1128 07:45:00.171847 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerName="extract-utilities" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.171859 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerName="extract-utilities" Nov 28 07:45:00 crc kubenswrapper[4922]: E1128 07:45:00.171883 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerName="extract-content" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.171893 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerName="extract-content" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.172138 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7918d4d-1d77-4f21-b7f3-7d7eff8b2824" containerName="registry-server" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.172924 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.175775 4922 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.175857 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.191206 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl"] Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.346070 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-config-volume\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.346166 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bsft\" (UniqueName: \"kubernetes.io/projected/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-kube-api-access-8bsft\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.346262 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-secret-volume\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.448296 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-config-volume\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.448392 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bsft\" (UniqueName: \"kubernetes.io/projected/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-kube-api-access-8bsft\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.448561 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-secret-volume\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.450461 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-config-volume\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.464306 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-secret-volume\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.481908 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bsft\" (UniqueName: \"kubernetes.io/projected/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-kube-api-access-8bsft\") pod \"collect-profiles-29405265-bqdxl\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.498374 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:00 crc kubenswrapper[4922]: I1128 07:45:00.792975 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl"] Nov 28 07:45:01 crc kubenswrapper[4922]: I1128 07:45:01.399083 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:45:01 crc kubenswrapper[4922]: E1128 07:45:01.399877 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:45:01 crc kubenswrapper[4922]: I1128 07:45:01.422544 4922 generic.go:334] "Generic (PLEG): container finished" podID="df1b9cc6-d4a0-46ce-a18a-e8c76d770038" containerID="31dff3e794c56c6d19d2a6efc279a28cce859f9609fd15d0edfcb9b1fa793ff3" exitCode=0 Nov 28 07:45:01 crc kubenswrapper[4922]: I1128 07:45:01.422607 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" event={"ID":"df1b9cc6-d4a0-46ce-a18a-e8c76d770038","Type":"ContainerDied","Data":"31dff3e794c56c6d19d2a6efc279a28cce859f9609fd15d0edfcb9b1fa793ff3"} Nov 28 07:45:01 crc kubenswrapper[4922]: I1128 07:45:01.422646 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" event={"ID":"df1b9cc6-d4a0-46ce-a18a-e8c76d770038","Type":"ContainerStarted","Data":"714ed53a494ab88a109669c19b0fe475f063fe79633f57b7ef2fe30226be67d2"} Nov 28 07:45:02 crc kubenswrapper[4922]: I1128 07:45:02.803626 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:02 crc kubenswrapper[4922]: I1128 07:45:02.988295 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-config-volume\") pod \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " Nov 28 07:45:02 crc kubenswrapper[4922]: I1128 07:45:02.988415 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-secret-volume\") pod \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " Nov 28 07:45:02 crc kubenswrapper[4922]: I1128 07:45:02.988551 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bsft\" (UniqueName: \"kubernetes.io/projected/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-kube-api-access-8bsft\") pod \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\" (UID: \"df1b9cc6-d4a0-46ce-a18a-e8c76d770038\") " Nov 28 07:45:02 crc kubenswrapper[4922]: I1128 07:45:02.989971 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-config-volume" (OuterVolumeSpecName: "config-volume") pod "df1b9cc6-d4a0-46ce-a18a-e8c76d770038" (UID: "df1b9cc6-d4a0-46ce-a18a-e8c76d770038"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 07:45:02 crc kubenswrapper[4922]: I1128 07:45:02.994513 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "df1b9cc6-d4a0-46ce-a18a-e8c76d770038" (UID: "df1b9cc6-d4a0-46ce-a18a-e8c76d770038"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 07:45:02 crc kubenswrapper[4922]: I1128 07:45:02.995386 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-kube-api-access-8bsft" (OuterVolumeSpecName: "kube-api-access-8bsft") pod "df1b9cc6-d4a0-46ce-a18a-e8c76d770038" (UID: "df1b9cc6-d4a0-46ce-a18a-e8c76d770038"). InnerVolumeSpecName "kube-api-access-8bsft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:45:03 crc kubenswrapper[4922]: I1128 07:45:03.090152 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bsft\" (UniqueName: \"kubernetes.io/projected/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-kube-api-access-8bsft\") on node \"crc\" DevicePath \"\"" Nov 28 07:45:03 crc kubenswrapper[4922]: I1128 07:45:03.090201 4922 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 07:45:03 crc kubenswrapper[4922]: I1128 07:45:03.090245 4922 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df1b9cc6-d4a0-46ce-a18a-e8c76d770038-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 07:45:03 crc kubenswrapper[4922]: I1128 07:45:03.454009 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" event={"ID":"df1b9cc6-d4a0-46ce-a18a-e8c76d770038","Type":"ContainerDied","Data":"714ed53a494ab88a109669c19b0fe475f063fe79633f57b7ef2fe30226be67d2"} Nov 28 07:45:03 crc kubenswrapper[4922]: I1128 07:45:03.454695 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="714ed53a494ab88a109669c19b0fe475f063fe79633f57b7ef2fe30226be67d2" Nov 28 07:45:03 crc kubenswrapper[4922]: I1128 07:45:03.454115 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405265-bqdxl" Nov 28 07:45:03 crc kubenswrapper[4922]: I1128 07:45:03.918033 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm"] Nov 28 07:45:03 crc kubenswrapper[4922]: I1128 07:45:03.930031 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405220-bpqvm"] Nov 28 07:45:05 crc kubenswrapper[4922]: I1128 07:45:05.414665 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9c226e4-e4fe-426b-9243-9ff96f72c93a" path="/var/lib/kubelet/pods/b9c226e4-e4fe-426b-9243-9ff96f72c93a/volumes" Nov 28 07:45:14 crc kubenswrapper[4922]: I1128 07:45:14.399094 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:45:14 crc kubenswrapper[4922]: E1128 07:45:14.400604 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:45:29 crc kubenswrapper[4922]: I1128 07:45:29.398760 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:45:29 crc kubenswrapper[4922]: E1128 07:45:29.399812 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:45:42 crc kubenswrapper[4922]: I1128 07:45:42.398421 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:45:42 crc kubenswrapper[4922]: E1128 07:45:42.399358 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:45:52 crc kubenswrapper[4922]: I1128 07:45:52.020019 4922 scope.go:117] "RemoveContainer" containerID="e71dc4514a0a44740d2773515fac0a2f153c5ad32ac0e91f835275d3df92dd1c" Nov 28 07:45:52 crc kubenswrapper[4922]: I1128 07:45:52.053383 4922 scope.go:117] "RemoveContainer" containerID="af64004bafd385c4d8d14c353710b2eb3556a86e73425b07063fb732991dc44b" Nov 28 07:45:53 crc kubenswrapper[4922]: I1128 07:45:53.398838 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:45:53 crc kubenswrapper[4922]: E1128 07:45:53.399627 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:46:04 crc kubenswrapper[4922]: I1128 07:46:04.398205 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:46:04 crc kubenswrapper[4922]: E1128 07:46:04.398658 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:46:18 crc kubenswrapper[4922]: I1128 07:46:18.398413 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:46:18 crc kubenswrapper[4922]: E1128 07:46:18.400708 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:46:31 crc kubenswrapper[4922]: I1128 07:46:31.398601 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:46:31 crc kubenswrapper[4922]: E1128 07:46:31.399620 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:46:45 crc kubenswrapper[4922]: I1128 07:46:45.403502 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:46:45 crc kubenswrapper[4922]: E1128 07:46:45.404477 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:46:56 crc kubenswrapper[4922]: I1128 07:46:56.398760 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:46:56 crc kubenswrapper[4922]: E1128 07:46:56.399756 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.327688 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-ftntt/must-gather-285z7"] Nov 28 07:47:03 crc kubenswrapper[4922]: E1128 07:47:03.328681 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df1b9cc6-d4a0-46ce-a18a-e8c76d770038" containerName="collect-profiles" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.328697 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="df1b9cc6-d4a0-46ce-a18a-e8c76d770038" containerName="collect-profiles" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.328932 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="df1b9cc6-d4a0-46ce-a18a-e8c76d770038" containerName="collect-profiles" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.329987 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.332065 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-ftntt"/"kube-root-ca.crt" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.332782 4922 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-ftntt"/"openshift-service-ca.crt" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.351064 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-ftntt/must-gather-285z7"] Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.408352 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jn9b4\" (UniqueName: \"kubernetes.io/projected/3f397631-c6cf-4382-8761-fe6c1c3d1eba-kube-api-access-jn9b4\") pod \"must-gather-285z7\" (UID: \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\") " pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.408400 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3f397631-c6cf-4382-8761-fe6c1c3d1eba-must-gather-output\") pod \"must-gather-285z7\" (UID: \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\") " pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.509464 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jn9b4\" (UniqueName: \"kubernetes.io/projected/3f397631-c6cf-4382-8761-fe6c1c3d1eba-kube-api-access-jn9b4\") pod \"must-gather-285z7\" (UID: \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\") " pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.509524 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3f397631-c6cf-4382-8761-fe6c1c3d1eba-must-gather-output\") pod \"must-gather-285z7\" (UID: \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\") " pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.509969 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3f397631-c6cf-4382-8761-fe6c1c3d1eba-must-gather-output\") pod \"must-gather-285z7\" (UID: \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\") " pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.539649 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jn9b4\" (UniqueName: \"kubernetes.io/projected/3f397631-c6cf-4382-8761-fe6c1c3d1eba-kube-api-access-jn9b4\") pod \"must-gather-285z7\" (UID: \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\") " pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.646880 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.923456 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-ftntt/must-gather-285z7"] Nov 28 07:47:03 crc kubenswrapper[4922]: I1128 07:47:03.927273 4922 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 07:47:04 crc kubenswrapper[4922]: I1128 07:47:04.581471 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ftntt/must-gather-285z7" event={"ID":"3f397631-c6cf-4382-8761-fe6c1c3d1eba","Type":"ContainerStarted","Data":"e028d74df5c2336ed06a0dfe77d5de20204d66219e6fe23085d36c3b132fd824"} Nov 28 07:47:09 crc kubenswrapper[4922]: I1128 07:47:09.398757 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:47:09 crc kubenswrapper[4922]: E1128 07:47:09.400275 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:47:11 crc kubenswrapper[4922]: I1128 07:47:11.642635 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ftntt/must-gather-285z7" event={"ID":"3f397631-c6cf-4382-8761-fe6c1c3d1eba","Type":"ContainerStarted","Data":"fd472eebc3f8062dd87907d673c1bf80cd8a36f314a541838af56551a88f239b"} Nov 28 07:47:11 crc kubenswrapper[4922]: I1128 07:47:11.643017 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ftntt/must-gather-285z7" event={"ID":"3f397631-c6cf-4382-8761-fe6c1c3d1eba","Type":"ContainerStarted","Data":"b68b3684879f21d363b41ce50a176601da5f515c8fdb708f92552a1db98e227a"} Nov 28 07:47:22 crc kubenswrapper[4922]: I1128 07:47:22.398908 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:47:22 crc kubenswrapper[4922]: E1128 07:47:22.400934 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:47:33 crc kubenswrapper[4922]: I1128 07:47:33.398489 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:47:33 crc kubenswrapper[4922]: I1128 07:47:33.822296 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"3f72ca2cbcc03c66bce9f3d447e83761baf9c3a351e079449e2d091afcaa328e"} Nov 28 07:47:33 crc kubenswrapper[4922]: I1128 07:47:33.844355 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-ftntt/must-gather-285z7" podStartSLOduration=24.262549959 podStartE2EDuration="30.84432754s" podCreationTimestamp="2025-11-28 07:47:03 +0000 UTC" firstStartedPulling="2025-11-28 07:47:03.927234558 +0000 UTC m=+3268.847630150" lastFinishedPulling="2025-11-28 07:47:10.509012149 +0000 UTC m=+3275.429407731" observedRunningTime="2025-11-28 07:47:11.674859314 +0000 UTC m=+3276.595254906" watchObservedRunningTime="2025-11-28 07:47:33.84432754 +0000 UTC m=+3298.764723162" Nov 28 07:48:11 crc kubenswrapper[4922]: I1128 07:48:11.451904 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72_960497bd-3a43-4bb6-b3b6-b8bdf0ddd451/util/0.log" Nov 28 07:48:11 crc kubenswrapper[4922]: I1128 07:48:11.641114 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72_960497bd-3a43-4bb6-b3b6-b8bdf0ddd451/pull/0.log" Nov 28 07:48:11 crc kubenswrapper[4922]: I1128 07:48:11.647936 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72_960497bd-3a43-4bb6-b3b6-b8bdf0ddd451/util/0.log" Nov 28 07:48:11 crc kubenswrapper[4922]: I1128 07:48:11.676002 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72_960497bd-3a43-4bb6-b3b6-b8bdf0ddd451/pull/0.log" Nov 28 07:48:11 crc kubenswrapper[4922]: I1128 07:48:11.859799 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72_960497bd-3a43-4bb6-b3b6-b8bdf0ddd451/util/0.log" Nov 28 07:48:11 crc kubenswrapper[4922]: I1128 07:48:11.894398 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72_960497bd-3a43-4bb6-b3b6-b8bdf0ddd451/pull/0.log" Nov 28 07:48:11 crc kubenswrapper[4922]: I1128 07:48:11.901778 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_b45623fa79426ee6d52ccc2f61ed894b37aa2fb70e5ce0cf390950ffbe22f72_960497bd-3a43-4bb6-b3b6-b8bdf0ddd451/extract/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.018152 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-7wc2c_743984bb-d44f-4721-b9a0-d12f71feb3e9/kube-rbac-proxy/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.082115 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-njqq9_5b5e379b-d7c5-45ae-81f8-cebce05059a0/kube-rbac-proxy/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.137769 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-7wc2c_743984bb-d44f-4721-b9a0-d12f71feb3e9/manager/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.251952 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-njqq9_5b5e379b-d7c5-45ae-81f8-cebce05059a0/manager/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.298916 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-6kwvm_8577290f-ffdd-49ed-8666-6d5ca2323102/kube-rbac-proxy/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.342140 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-6kwvm_8577290f-ffdd-49ed-8666-6d5ca2323102/manager/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.462401 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-89z98_3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be/kube-rbac-proxy/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.503402 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-89z98_3ec4d3b2-efb1-4aa0-a5dc-a130ffc887be/manager/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.625679 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-vggwt_a428f994-d14f-4a4a-aec2-ce114d56f7a9/kube-rbac-proxy/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.626744 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-vggwt_a428f994-d14f-4a4a-aec2-ce114d56f7a9/manager/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.746858 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-htnmk_b0972771-f2d5-4f8d-91cb-ef2fde0b536b/kube-rbac-proxy/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.787517 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-htnmk_b0972771-f2d5-4f8d-91cb-ef2fde0b536b/manager/0.log" Nov 28 07:48:12 crc kubenswrapper[4922]: I1128 07:48:12.861612 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x2bzf_a2b17b7d-a398-4ce6-9e4a-6d80e1e97369/kube-rbac-proxy/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.013153 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-cdnsq_9c2b26fd-dac9-45ae-b100-b46c85c11506/kube-rbac-proxy/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.062736 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-cdnsq_9c2b26fd-dac9-45ae-b100-b46c85c11506/manager/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.103757 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x2bzf_a2b17b7d-a398-4ce6-9e4a-6d80e1e97369/manager/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.230790 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-5m4mz_1ecf6399-ca02-4591-89cd-1a0731a5a75c/kube-rbac-proxy/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.311391 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-5m4mz_1ecf6399-ca02-4591-89cd-1a0731a5a75c/manager/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.376411 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-4csdv_0a20a7e6-e9a5-433a-bf5e-d8f62053f611/kube-rbac-proxy/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.443308 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-4csdv_0a20a7e6-e9a5-433a-bf5e-d8f62053f611/manager/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.515794 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-bthkr_31bc74a9-0e17-4400-bef5-d36ff53831dd/kube-rbac-proxy/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.621690 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-bthkr_31bc74a9-0e17-4400-bef5-d36ff53831dd/manager/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.728887 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-5bl8t_b32f9a0b-4c07-433c-b02a-aed24265ae16/manager/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.765418 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-5bl8t_b32f9a0b-4c07-433c-b02a-aed24265ae16/kube-rbac-proxy/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.857881 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-jbsc8_c905882d-e743-49a3-b331-f3b7cc0d8649/kube-rbac-proxy/0.log" Nov 28 07:48:13 crc kubenswrapper[4922]: I1128 07:48:13.982757 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-jbsc8_c905882d-e743-49a3-b331-f3b7cc0d8649/manager/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.060778 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-74d6v_fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d/kube-rbac-proxy/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.132201 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-74d6v_fafc81ca-dc80-4ef4-a8e3-9e63f5f45a2d/manager/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.178450 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5d9f9695db9c48b_ff496359-1983-4a83-a344-3fb11e4587d9/kube-rbac-proxy/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.260713 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5d9f9695db9c48b_ff496359-1983-4a83-a344-3fb11e4587d9/manager/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.721401 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-67d8f6cc56-bdwdc_0474c723-3f2a-42b6-a1c7-ff0472731025/operator/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.737625 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-b6hrw_37166abd-f8e0-4749-8569-c133eda4d74a/registry-server/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.882652 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-rl77z_5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf/kube-rbac-proxy/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.953649 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-66f75ddbcc-b48l9_5f800348-9092-4701-a85d-3bb8f40b51bd/manager/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.961260 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-jgwt8_210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b/kube-rbac-proxy/0.log" Nov 28 07:48:14 crc kubenswrapper[4922]: I1128 07:48:14.989151 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-rl77z_5ab2d7c3-71a5-4337-acee-8ed5e7f09dbf/manager/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.095888 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-jgwt8_210cb1b4-9abb-454e-9f7e-9dbd52ec0c9b/manager/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.169119 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-zq5v4_7427e034-38e4-460d-aefa-15842840a7c0/operator/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.224691 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-4f6j5_bbf78ced-8521-464d-a517-8ca721301421/kube-rbac-proxy/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.274477 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-4f6j5_bbf78ced-8521-464d-a517-8ca721301421/manager/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.330876 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-5hpq8_89dd35b2-343e-4e06-9fac-6474b93b294e/kube-rbac-proxy/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.413346 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-5hpq8_89dd35b2-343e-4e06-9fac-6474b93b294e/manager/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.463622 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-7mrxz_857fa43c-dddb-46c5-88e2-00e359b43c66/kube-rbac-proxy/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.515499 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-7mrxz_857fa43c-dddb-46c5-88e2-00e359b43c66/manager/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.579304 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-h76jf_b46e91a2-650c-4ae1-afa0-6edc6768c99e/kube-rbac-proxy/0.log" Nov 28 07:48:15 crc kubenswrapper[4922]: I1128 07:48:15.626574 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-h76jf_b46e91a2-650c-4ae1-afa0-6edc6768c99e/manager/0.log" Nov 28 07:48:34 crc kubenswrapper[4922]: I1128 07:48:34.216599 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-w94qh_648b4624-2324-4ec1-aa88-5822c9f89034/control-plane-machine-set-operator/0.log" Nov 28 07:48:34 crc kubenswrapper[4922]: I1128 07:48:34.349905 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rllbn_b5c2983d-2d59-4691-b2cd-130bfcd3e18c/kube-rbac-proxy/0.log" Nov 28 07:48:34 crc kubenswrapper[4922]: I1128 07:48:34.402878 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-rllbn_b5c2983d-2d59-4691-b2cd-130bfcd3e18c/machine-api-operator/0.log" Nov 28 07:48:47 crc kubenswrapper[4922]: I1128 07:48:47.809818 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-dpbll_6716937d-e1d0-4f62-b3bb-8f7102bded6c/cert-manager-controller/0.log" Nov 28 07:48:47 crc kubenswrapper[4922]: I1128 07:48:47.947747 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-mqk48_bfc3b5d5-095c-444a-a868-2c7318e4ae1f/cert-manager-cainjector/0.log" Nov 28 07:48:47 crc kubenswrapper[4922]: I1128 07:48:47.977613 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-r8sg6_9a71a28c-df53-4df0-b3a8-5caae467bb94/cert-manager-webhook/0.log" Nov 28 07:49:01 crc kubenswrapper[4922]: I1128 07:49:01.498926 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-d7sl7_bb22bb64-4624-4718-9785-1dc7c3c125e4/nmstate-console-plugin/0.log" Nov 28 07:49:01 crc kubenswrapper[4922]: I1128 07:49:01.652777 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-btcsz_59fb61f1-8020-40e3-b397-d6ffa8fc74e4/nmstate-handler/0.log" Nov 28 07:49:01 crc kubenswrapper[4922]: I1128 07:49:01.690140 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-fsj5r_bda6cb3f-c411-41a1-8c49-338ac07f8193/kube-rbac-proxy/0.log" Nov 28 07:49:01 crc kubenswrapper[4922]: I1128 07:49:01.745886 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-fsj5r_bda6cb3f-c411-41a1-8c49-338ac07f8193/nmstate-metrics/0.log" Nov 28 07:49:01 crc kubenswrapper[4922]: I1128 07:49:01.870942 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-khmcp_77e47301-a519-4efa-bd65-6530957b27a0/nmstate-operator/0.log" Nov 28 07:49:01 crc kubenswrapper[4922]: I1128 07:49:01.942511 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-57bpj_13075e30-8b1b-484b-9808-c2912780a009/nmstate-webhook/0.log" Nov 28 07:49:14 crc kubenswrapper[4922]: I1128 07:49:14.905529 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2s7g6"] Nov 28 07:49:14 crc kubenswrapper[4922]: I1128 07:49:14.907790 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:14 crc kubenswrapper[4922]: I1128 07:49:14.930352 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2s7g6"] Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.074212 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-catalog-content\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.074346 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-utilities\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.074419 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l52gs\" (UniqueName: \"kubernetes.io/projected/91ea2172-69d4-4310-970a-376c9e536ed2-kube-api-access-l52gs\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.176169 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l52gs\" (UniqueName: \"kubernetes.io/projected/91ea2172-69d4-4310-970a-376c9e536ed2-kube-api-access-l52gs\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.176386 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-catalog-content\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.176444 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-utilities\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.177292 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-catalog-content\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.177365 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-utilities\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.202155 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l52gs\" (UniqueName: \"kubernetes.io/projected/91ea2172-69d4-4310-970a-376c9e536ed2-kube-api-access-l52gs\") pod \"certified-operators-2s7g6\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.236947 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.684915 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2s7g6"] Nov 28 07:49:15 crc kubenswrapper[4922]: I1128 07:49:15.696059 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2s7g6" event={"ID":"91ea2172-69d4-4310-970a-376c9e536ed2","Type":"ContainerStarted","Data":"20e15e6227fd190ad8db99cf19f51f10fc0d8a4cc42e9bcf1a07036120e33364"} Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.707277 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r9jp6"] Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.711189 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.726189 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r9jp6"] Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.733802 4922 generic.go:334] "Generic (PLEG): container finished" podID="91ea2172-69d4-4310-970a-376c9e536ed2" containerID="2a349536975ee526c740c5872ec7c629cf8cc7c7825a7ff3d004c5b749eb9084" exitCode=0 Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.733867 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2s7g6" event={"ID":"91ea2172-69d4-4310-970a-376c9e536ed2","Type":"ContainerDied","Data":"2a349536975ee526c740c5872ec7c629cf8cc7c7825a7ff3d004c5b749eb9084"} Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.898155 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-utilities\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.898499 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-catalog-content\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.898522 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z45k\" (UniqueName: \"kubernetes.io/projected/75037d4c-bc94-4df5-9c07-faf075579935-kube-api-access-8z45k\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.999271 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-utilities\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.999347 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-catalog-content\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.999368 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z45k\" (UniqueName: \"kubernetes.io/projected/75037d4c-bc94-4df5-9c07-faf075579935-kube-api-access-8z45k\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:16 crc kubenswrapper[4922]: I1128 07:49:16.999918 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-catalog-content\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:16.999971 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-utilities\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.017858 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z45k\" (UniqueName: \"kubernetes.io/projected/75037d4c-bc94-4df5-9c07-faf075579935-kube-api-access-8z45k\") pod \"redhat-operators-r9jp6\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.039287 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.258615 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-5zhcz_20cf8473-8d3f-479b-8126-ecf9370b3b75/kube-rbac-proxy/0.log" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.308314 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dk9mk"] Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.309772 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.322176 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dk9mk"] Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.405050 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tv44h\" (UniqueName: \"kubernetes.io/projected/03a90ec9-7f0a-4cef-9465-c7deaad99357-kube-api-access-tv44h\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.405086 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-utilities\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.405112 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-catalog-content\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.464308 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r9jp6"] Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.506055 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tv44h\" (UniqueName: \"kubernetes.io/projected/03a90ec9-7f0a-4cef-9465-c7deaad99357-kube-api-access-tv44h\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.506101 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-utilities\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.506126 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-catalog-content\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.507432 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-utilities\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.507642 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-catalog-content\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.507702 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-5zhcz_20cf8473-8d3f-479b-8126-ecf9370b3b75/controller/0.log" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.531486 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tv44h\" (UniqueName: \"kubernetes.io/projected/03a90ec9-7f0a-4cef-9465-c7deaad99357-kube-api-access-tv44h\") pod \"redhat-marketplace-dk9mk\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.589757 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-frr-files/0.log" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.635825 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.741472 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2s7g6" event={"ID":"91ea2172-69d4-4310-970a-376c9e536ed2","Type":"ContainerStarted","Data":"903b8f1c6b8e4f36dcc4a676d8c9e83837f9032ed9ed880619aa5bd642796966"} Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.757829 4922 generic.go:334] "Generic (PLEG): container finished" podID="75037d4c-bc94-4df5-9c07-faf075579935" containerID="bbcc35b889ed8579dd4b8256e88befbfa3314156da18b2f6e5e954d15ef59943" exitCode=0 Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.757874 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r9jp6" event={"ID":"75037d4c-bc94-4df5-9c07-faf075579935","Type":"ContainerDied","Data":"bbcc35b889ed8579dd4b8256e88befbfa3314156da18b2f6e5e954d15ef59943"} Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.757896 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r9jp6" event={"ID":"75037d4c-bc94-4df5-9c07-faf075579935","Type":"ContainerStarted","Data":"cec528950c9c47c4f59004cadd9be1af307e103ffb90a43b689c7d6ce5655d84"} Nov 28 07:49:17 crc kubenswrapper[4922]: I1128 07:49:17.969179 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-metrics/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.020343 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-reloader/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.119634 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-reloader/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.119999 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-frr-files/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.129044 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dk9mk"] Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.310830 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-frr-files/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.368148 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-metrics/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.372611 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-metrics/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.408638 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-reloader/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.623031 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-metrics/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.628879 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-reloader/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.641642 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/cp-frr-files/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.675964 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/controller/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.766799 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r9jp6" event={"ID":"75037d4c-bc94-4df5-9c07-faf075579935","Type":"ContainerStarted","Data":"448949ef6f070e962c79825c84ee3b6bc77cf98592edaa41e6fa771256e5a9be"} Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.768468 4922 generic.go:334] "Generic (PLEG): container finished" podID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerID="121eda8ce6d78f09053db84e301a7799040239571b722d5b6a9c46c090d4c9d2" exitCode=0 Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.768528 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dk9mk" event={"ID":"03a90ec9-7f0a-4cef-9465-c7deaad99357","Type":"ContainerDied","Data":"121eda8ce6d78f09053db84e301a7799040239571b722d5b6a9c46c090d4c9d2"} Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.768546 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dk9mk" event={"ID":"03a90ec9-7f0a-4cef-9465-c7deaad99357","Type":"ContainerStarted","Data":"8b9e3a4c8bf50fc1ea6dacf02ffa0a7cbcae5af357b12207bf50074ffaa7e584"} Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.770903 4922 generic.go:334] "Generic (PLEG): container finished" podID="91ea2172-69d4-4310-970a-376c9e536ed2" containerID="903b8f1c6b8e4f36dcc4a676d8c9e83837f9032ed9ed880619aa5bd642796966" exitCode=0 Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.770924 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2s7g6" event={"ID":"91ea2172-69d4-4310-970a-376c9e536ed2","Type":"ContainerDied","Data":"903b8f1c6b8e4f36dcc4a676d8c9e83837f9032ed9ed880619aa5bd642796966"} Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.861175 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/frr-metrics/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.879662 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/kube-rbac-proxy-frr/0.log" Nov 28 07:49:18 crc kubenswrapper[4922]: I1128 07:49:18.887477 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/kube-rbac-proxy/0.log" Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.057759 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/reloader/0.log" Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.133341 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-g5vcm_70214104-7571-4422-9e5d-85d690d8469f/frr-k8s-webhook-server/0.log" Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.323117 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5cdbbf65c9-tnz7w_60818a80-77e8-4256-a958-a6333c763453/manager/0.log" Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.469599 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-74d65d85cd-qs7c6_5de70ea0-722a-4efb-8f49-f005ef4f661c/webhook-server/0.log" Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.667214 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-ncff2_33fadc85-85d3-48cc-977c-babb047b9a0c/kube-rbac-proxy/0.log" Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.764679 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qgjm8_72bbf902-847b-45f2-9bb3-57de7a0a88ce/frr/0.log" Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.784415 4922 generic.go:334] "Generic (PLEG): container finished" podID="75037d4c-bc94-4df5-9c07-faf075579935" containerID="448949ef6f070e962c79825c84ee3b6bc77cf98592edaa41e6fa771256e5a9be" exitCode=0 Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.784480 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r9jp6" event={"ID":"75037d4c-bc94-4df5-9c07-faf075579935","Type":"ContainerDied","Data":"448949ef6f070e962c79825c84ee3b6bc77cf98592edaa41e6fa771256e5a9be"} Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.786646 4922 generic.go:334] "Generic (PLEG): container finished" podID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerID="02810b35a38db5607785a33444938486f006f169077d7640c7d8c3b7db2f32e3" exitCode=0 Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.786689 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dk9mk" event={"ID":"03a90ec9-7f0a-4cef-9465-c7deaad99357","Type":"ContainerDied","Data":"02810b35a38db5607785a33444938486f006f169077d7640c7d8c3b7db2f32e3"} Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.789631 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2s7g6" event={"ID":"91ea2172-69d4-4310-970a-376c9e536ed2","Type":"ContainerStarted","Data":"908dad43aeae2916059513282bc62778871fb77173bd88b221382cdd56100c05"} Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.839910 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2s7g6" podStartSLOduration=3.14093921 podStartE2EDuration="5.839893535s" podCreationTimestamp="2025-11-28 07:49:14 +0000 UTC" firstStartedPulling="2025-11-28 07:49:16.73761295 +0000 UTC m=+3401.658008542" lastFinishedPulling="2025-11-28 07:49:19.436567285 +0000 UTC m=+3404.356962867" observedRunningTime="2025-11-28 07:49:19.838255721 +0000 UTC m=+3404.758651313" watchObservedRunningTime="2025-11-28 07:49:19.839893535 +0000 UTC m=+3404.760289117" Nov 28 07:49:19 crc kubenswrapper[4922]: I1128 07:49:19.968911 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-ncff2_33fadc85-85d3-48cc-977c-babb047b9a0c/speaker/0.log" Nov 28 07:49:20 crc kubenswrapper[4922]: I1128 07:49:20.799053 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dk9mk" event={"ID":"03a90ec9-7f0a-4cef-9465-c7deaad99357","Type":"ContainerStarted","Data":"164abd224027a85a5896a710bcb19f4be488e6082b5125659da26370d48ed3fb"} Nov 28 07:49:20 crc kubenswrapper[4922]: I1128 07:49:20.801454 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r9jp6" event={"ID":"75037d4c-bc94-4df5-9c07-faf075579935","Type":"ContainerStarted","Data":"a6c2fe381ab2aef2c9cd20c892891b8a5a94603cb365f283a06d80aeb75d6216"} Nov 28 07:49:20 crc kubenswrapper[4922]: I1128 07:49:20.819458 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dk9mk" podStartSLOduration=2.159808914 podStartE2EDuration="3.819422951s" podCreationTimestamp="2025-11-28 07:49:17 +0000 UTC" firstStartedPulling="2025-11-28 07:49:18.769664041 +0000 UTC m=+3403.690059623" lastFinishedPulling="2025-11-28 07:49:20.429278078 +0000 UTC m=+3405.349673660" observedRunningTime="2025-11-28 07:49:20.817207091 +0000 UTC m=+3405.737602673" watchObservedRunningTime="2025-11-28 07:49:20.819422951 +0000 UTC m=+3405.739818543" Nov 28 07:49:20 crc kubenswrapper[4922]: I1128 07:49:20.844003 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r9jp6" podStartSLOduration=2.402503772 podStartE2EDuration="4.843984047s" podCreationTimestamp="2025-11-28 07:49:16 +0000 UTC" firstStartedPulling="2025-11-28 07:49:17.760260956 +0000 UTC m=+3402.680656538" lastFinishedPulling="2025-11-28 07:49:20.201741231 +0000 UTC m=+3405.122136813" observedRunningTime="2025-11-28 07:49:20.837333816 +0000 UTC m=+3405.757729408" watchObservedRunningTime="2025-11-28 07:49:20.843984047 +0000 UTC m=+3405.764379629" Nov 28 07:49:25 crc kubenswrapper[4922]: I1128 07:49:25.237983 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:25 crc kubenswrapper[4922]: I1128 07:49:25.238392 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:25 crc kubenswrapper[4922]: I1128 07:49:25.289037 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:25 crc kubenswrapper[4922]: I1128 07:49:25.910698 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.039549 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.040069 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.118193 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2s7g6"] Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.122319 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.636258 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.636346 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.695324 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.859750 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2s7g6" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" containerName="registry-server" containerID="cri-o://908dad43aeae2916059513282bc62778871fb77173bd88b221382cdd56100c05" gracePeriod=2 Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.907682 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:27 crc kubenswrapper[4922]: I1128 07:49:27.912393 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:29 crc kubenswrapper[4922]: I1128 07:49:29.488729 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r9jp6"] Nov 28 07:49:29 crc kubenswrapper[4922]: I1128 07:49:29.886186 4922 generic.go:334] "Generic (PLEG): container finished" podID="91ea2172-69d4-4310-970a-376c9e536ed2" containerID="908dad43aeae2916059513282bc62778871fb77173bd88b221382cdd56100c05" exitCode=0 Nov 28 07:49:29 crc kubenswrapper[4922]: I1128 07:49:29.886302 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2s7g6" event={"ID":"91ea2172-69d4-4310-970a-376c9e536ed2","Type":"ContainerDied","Data":"908dad43aeae2916059513282bc62778871fb77173bd88b221382cdd56100c05"} Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.318447 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.509656 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-catalog-content\") pod \"91ea2172-69d4-4310-970a-376c9e536ed2\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.509806 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-utilities\") pod \"91ea2172-69d4-4310-970a-376c9e536ed2\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.509877 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l52gs\" (UniqueName: \"kubernetes.io/projected/91ea2172-69d4-4310-970a-376c9e536ed2-kube-api-access-l52gs\") pod \"91ea2172-69d4-4310-970a-376c9e536ed2\" (UID: \"91ea2172-69d4-4310-970a-376c9e536ed2\") " Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.510719 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-utilities" (OuterVolumeSpecName: "utilities") pod "91ea2172-69d4-4310-970a-376c9e536ed2" (UID: "91ea2172-69d4-4310-970a-376c9e536ed2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.519470 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91ea2172-69d4-4310-970a-376c9e536ed2-kube-api-access-l52gs" (OuterVolumeSpecName: "kube-api-access-l52gs") pod "91ea2172-69d4-4310-970a-376c9e536ed2" (UID: "91ea2172-69d4-4310-970a-376c9e536ed2"). InnerVolumeSpecName "kube-api-access-l52gs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.553897 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "91ea2172-69d4-4310-970a-376c9e536ed2" (UID: "91ea2172-69d4-4310-970a-376c9e536ed2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.611629 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.611673 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l52gs\" (UniqueName: \"kubernetes.io/projected/91ea2172-69d4-4310-970a-376c9e536ed2-kube-api-access-l52gs\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.611691 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91ea2172-69d4-4310-970a-376c9e536ed2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.896265 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2s7g6" event={"ID":"91ea2172-69d4-4310-970a-376c9e536ed2","Type":"ContainerDied","Data":"20e15e6227fd190ad8db99cf19f51f10fc0d8a4cc42e9bcf1a07036120e33364"} Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.896320 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2s7g6" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.896328 4922 scope.go:117] "RemoveContainer" containerID="908dad43aeae2916059513282bc62778871fb77173bd88b221382cdd56100c05" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.896737 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r9jp6" podUID="75037d4c-bc94-4df5-9c07-faf075579935" containerName="registry-server" containerID="cri-o://a6c2fe381ab2aef2c9cd20c892891b8a5a94603cb365f283a06d80aeb75d6216" gracePeriod=2 Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.946333 4922 scope.go:117] "RemoveContainer" containerID="903b8f1c6b8e4f36dcc4a676d8c9e83837f9032ed9ed880619aa5bd642796966" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.972732 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2s7g6"] Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.978996 4922 scope.go:117] "RemoveContainer" containerID="2a349536975ee526c740c5872ec7c629cf8cc7c7825a7ff3d004c5b749eb9084" Nov 28 07:49:30 crc kubenswrapper[4922]: I1128 07:49:30.984271 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2s7g6"] Nov 28 07:49:31 crc kubenswrapper[4922]: E1128 07:49:31.102307 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91ea2172_69d4_4310_970a_376c9e536ed2.slice/crio-20e15e6227fd190ad8db99cf19f51f10fc0d8a4cc42e9bcf1a07036120e33364\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75037d4c_bc94_4df5_9c07_faf075579935.slice/crio-a6c2fe381ab2aef2c9cd20c892891b8a5a94603cb365f283a06d80aeb75d6216.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91ea2172_69d4_4310_970a_376c9e536ed2.slice\": RecentStats: unable to find data in memory cache]" Nov 28 07:49:31 crc kubenswrapper[4922]: I1128 07:49:31.409410 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" path="/var/lib/kubelet/pods/91ea2172-69d4-4310-970a-376c9e536ed2/volumes" Nov 28 07:49:31 crc kubenswrapper[4922]: I1128 07:49:31.691219 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dk9mk"] Nov 28 07:49:31 crc kubenswrapper[4922]: I1128 07:49:31.691452 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dk9mk" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerName="registry-server" containerID="cri-o://164abd224027a85a5896a710bcb19f4be488e6082b5125659da26370d48ed3fb" gracePeriod=2 Nov 28 07:49:31 crc kubenswrapper[4922]: I1128 07:49:31.907516 4922 generic.go:334] "Generic (PLEG): container finished" podID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerID="164abd224027a85a5896a710bcb19f4be488e6082b5125659da26370d48ed3fb" exitCode=0 Nov 28 07:49:31 crc kubenswrapper[4922]: I1128 07:49:31.907621 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dk9mk" event={"ID":"03a90ec9-7f0a-4cef-9465-c7deaad99357","Type":"ContainerDied","Data":"164abd224027a85a5896a710bcb19f4be488e6082b5125659da26370d48ed3fb"} Nov 28 07:49:31 crc kubenswrapper[4922]: I1128 07:49:31.912094 4922 generic.go:334] "Generic (PLEG): container finished" podID="75037d4c-bc94-4df5-9c07-faf075579935" containerID="a6c2fe381ab2aef2c9cd20c892891b8a5a94603cb365f283a06d80aeb75d6216" exitCode=0 Nov 28 07:49:31 crc kubenswrapper[4922]: I1128 07:49:31.912137 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r9jp6" event={"ID":"75037d4c-bc94-4df5-9c07-faf075579935","Type":"ContainerDied","Data":"a6c2fe381ab2aef2c9cd20c892891b8a5a94603cb365f283a06d80aeb75d6216"} Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.162633 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.236439 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-catalog-content\") pod \"03a90ec9-7f0a-4cef-9465-c7deaad99357\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.236523 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-utilities\") pod \"03a90ec9-7f0a-4cef-9465-c7deaad99357\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.236578 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tv44h\" (UniqueName: \"kubernetes.io/projected/03a90ec9-7f0a-4cef-9465-c7deaad99357-kube-api-access-tv44h\") pod \"03a90ec9-7f0a-4cef-9465-c7deaad99357\" (UID: \"03a90ec9-7f0a-4cef-9465-c7deaad99357\") " Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.242360 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-utilities" (OuterVolumeSpecName: "utilities") pod "03a90ec9-7f0a-4cef-9465-c7deaad99357" (UID: "03a90ec9-7f0a-4cef-9465-c7deaad99357"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.253781 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03a90ec9-7f0a-4cef-9465-c7deaad99357-kube-api-access-tv44h" (OuterVolumeSpecName: "kube-api-access-tv44h") pod "03a90ec9-7f0a-4cef-9465-c7deaad99357" (UID: "03a90ec9-7f0a-4cef-9465-c7deaad99357"). InnerVolumeSpecName "kube-api-access-tv44h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.258645 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "03a90ec9-7f0a-4cef-9465-c7deaad99357" (UID: "03a90ec9-7f0a-4cef-9465-c7deaad99357"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.338441 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.338468 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03a90ec9-7f0a-4cef-9465-c7deaad99357-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.338477 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tv44h\" (UniqueName: \"kubernetes.io/projected/03a90ec9-7f0a-4cef-9465-c7deaad99357-kube-api-access-tv44h\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.380137 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.439354 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-utilities\") pod \"75037d4c-bc94-4df5-9c07-faf075579935\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.439459 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8z45k\" (UniqueName: \"kubernetes.io/projected/75037d4c-bc94-4df5-9c07-faf075579935-kube-api-access-8z45k\") pod \"75037d4c-bc94-4df5-9c07-faf075579935\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.439490 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-catalog-content\") pod \"75037d4c-bc94-4df5-9c07-faf075579935\" (UID: \"75037d4c-bc94-4df5-9c07-faf075579935\") " Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.441821 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-utilities" (OuterVolumeSpecName: "utilities") pod "75037d4c-bc94-4df5-9c07-faf075579935" (UID: "75037d4c-bc94-4df5-9c07-faf075579935"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.447293 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75037d4c-bc94-4df5-9c07-faf075579935-kube-api-access-8z45k" (OuterVolumeSpecName: "kube-api-access-8z45k") pod "75037d4c-bc94-4df5-9c07-faf075579935" (UID: "75037d4c-bc94-4df5-9c07-faf075579935"). InnerVolumeSpecName "kube-api-access-8z45k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.540952 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8z45k\" (UniqueName: \"kubernetes.io/projected/75037d4c-bc94-4df5-9c07-faf075579935-kube-api-access-8z45k\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.540995 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.547777 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "75037d4c-bc94-4df5-9c07-faf075579935" (UID: "75037d4c-bc94-4df5-9c07-faf075579935"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.642687 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75037d4c-bc94-4df5-9c07-faf075579935-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.921141 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r9jp6" event={"ID":"75037d4c-bc94-4df5-9c07-faf075579935","Type":"ContainerDied","Data":"cec528950c9c47c4f59004cadd9be1af307e103ffb90a43b689c7d6ce5655d84"} Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.921208 4922 scope.go:117] "RemoveContainer" containerID="a6c2fe381ab2aef2c9cd20c892891b8a5a94603cb365f283a06d80aeb75d6216" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.921213 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r9jp6" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.923948 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dk9mk" event={"ID":"03a90ec9-7f0a-4cef-9465-c7deaad99357","Type":"ContainerDied","Data":"8b9e3a4c8bf50fc1ea6dacf02ffa0a7cbcae5af357b12207bf50074ffaa7e584"} Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.924023 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dk9mk" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.944588 4922 scope.go:117] "RemoveContainer" containerID="448949ef6f070e962c79825c84ee3b6bc77cf98592edaa41e6fa771256e5a9be" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.975919 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r9jp6"] Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.980000 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r9jp6"] Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.987509 4922 scope.go:117] "RemoveContainer" containerID="bbcc35b889ed8579dd4b8256e88befbfa3314156da18b2f6e5e954d15ef59943" Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.990641 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dk9mk"] Nov 28 07:49:32 crc kubenswrapper[4922]: I1128 07:49:32.999354 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dk9mk"] Nov 28 07:49:33 crc kubenswrapper[4922]: I1128 07:49:33.008730 4922 scope.go:117] "RemoveContainer" containerID="164abd224027a85a5896a710bcb19f4be488e6082b5125659da26370d48ed3fb" Nov 28 07:49:33 crc kubenswrapper[4922]: I1128 07:49:33.031487 4922 scope.go:117] "RemoveContainer" containerID="02810b35a38db5607785a33444938486f006f169077d7640c7d8c3b7db2f32e3" Nov 28 07:49:33 crc kubenswrapper[4922]: I1128 07:49:33.051580 4922 scope.go:117] "RemoveContainer" containerID="121eda8ce6d78f09053db84e301a7799040239571b722d5b6a9c46c090d4c9d2" Nov 28 07:49:33 crc kubenswrapper[4922]: I1128 07:49:33.407919 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" path="/var/lib/kubelet/pods/03a90ec9-7f0a-4cef-9465-c7deaad99357/volumes" Nov 28 07:49:33 crc kubenswrapper[4922]: I1128 07:49:33.408858 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75037d4c-bc94-4df5-9c07-faf075579935" path="/var/lib/kubelet/pods/75037d4c-bc94-4df5-9c07-faf075579935/volumes" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.101777 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp_560410b8-61f1-465c-a05e-edc9b25f15c5/util/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.188450 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp_560410b8-61f1-465c-a05e-edc9b25f15c5/util/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.218688 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp_560410b8-61f1-465c-a05e-edc9b25f15c5/pull/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.222885 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp_560410b8-61f1-465c-a05e-edc9b25f15c5/pull/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.345499 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp_560410b8-61f1-465c-a05e-edc9b25f15c5/util/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.374844 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp_560410b8-61f1-465c-a05e-edc9b25f15c5/pull/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.401828 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931ah62vp_560410b8-61f1-465c-a05e-edc9b25f15c5/extract/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.539343 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw_dd39c4e1-e433-4bd0-89e5-4c3fd87987e7/util/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.668959 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw_dd39c4e1-e433-4bd0-89e5-4c3fd87987e7/util/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.691724 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw_dd39c4e1-e433-4bd0-89e5-4c3fd87987e7/pull/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.695247 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw_dd39c4e1-e433-4bd0-89e5-4c3fd87987e7/pull/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.977434 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw_dd39c4e1-e433-4bd0-89e5-4c3fd87987e7/extract/0.log" Nov 28 07:49:34 crc kubenswrapper[4922]: I1128 07:49:34.996762 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw_dd39c4e1-e433-4bd0-89e5-4c3fd87987e7/util/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.012429 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fctzkw_dd39c4e1-e433-4bd0-89e5-4c3fd87987e7/pull/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.144536 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf_57ced773-180b-4ccd-a494-a78a39e66083/util/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.306464 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf_57ced773-180b-4ccd-a494-a78a39e66083/util/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.309652 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf_57ced773-180b-4ccd-a494-a78a39e66083/pull/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.328516 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf_57ced773-180b-4ccd-a494-a78a39e66083/pull/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.500146 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf_57ced773-180b-4ccd-a494-a78a39e66083/util/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.500990 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf_57ced773-180b-4ccd-a494-a78a39e66083/pull/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.526703 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83jmktf_57ced773-180b-4ccd-a494-a78a39e66083/extract/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.646776 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g5rdz_80be7cf9-c24f-4f7d-a6df-49ba99b04994/extract-utilities/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.815069 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g5rdz_80be7cf9-c24f-4f7d-a6df-49ba99b04994/extract-utilities/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.829574 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g5rdz_80be7cf9-c24f-4f7d-a6df-49ba99b04994/extract-content/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.844137 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g5rdz_80be7cf9-c24f-4f7d-a6df-49ba99b04994/extract-content/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.994428 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g5rdz_80be7cf9-c24f-4f7d-a6df-49ba99b04994/extract-utilities/0.log" Nov 28 07:49:35 crc kubenswrapper[4922]: I1128 07:49:35.999776 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g5rdz_80be7cf9-c24f-4f7d-a6df-49ba99b04994/extract-content/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.203213 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-gwrfc_a991fe40-8b80-453f-ade4-588ea459cf0b/extract-utilities/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.394208 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-gwrfc_a991fe40-8b80-453f-ade4-588ea459cf0b/extract-content/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.409133 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-gwrfc_a991fe40-8b80-453f-ade4-588ea459cf0b/extract-content/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.437088 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-gwrfc_a991fe40-8b80-453f-ade4-588ea459cf0b/extract-utilities/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.467665 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-g5rdz_80be7cf9-c24f-4f7d-a6df-49ba99b04994/registry-server/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.606690 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-gwrfc_a991fe40-8b80-453f-ade4-588ea459cf0b/extract-content/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.634550 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-gwrfc_a991fe40-8b80-453f-ade4-588ea459cf0b/extract-utilities/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.789807 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-vpzh2_75a072db-ed75-4f86-8fef-e5c9de393433/marketplace-operator/0.log" Nov 28 07:49:36 crc kubenswrapper[4922]: I1128 07:49:36.968118 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rtjdf_c6c2bb80-f2be-424e-92ce-4e3b1e9ce558/extract-utilities/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.123746 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-gwrfc_a991fe40-8b80-453f-ade4-588ea459cf0b/registry-server/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.142620 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rtjdf_c6c2bb80-f2be-424e-92ce-4e3b1e9ce558/extract-utilities/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.184200 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rtjdf_c6c2bb80-f2be-424e-92ce-4e3b1e9ce558/extract-content/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.186184 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rtjdf_c6c2bb80-f2be-424e-92ce-4e3b1e9ce558/extract-content/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.343404 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rtjdf_c6c2bb80-f2be-424e-92ce-4e3b1e9ce558/extract-content/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.361015 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rtjdf_c6c2bb80-f2be-424e-92ce-4e3b1e9ce558/extract-utilities/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.392989 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nxv9j_a3162404-2375-4cd0-8d8d-73520e721f53/extract-utilities/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.473160 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rtjdf_c6c2bb80-f2be-424e-92ce-4e3b1e9ce558/registry-server/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.556456 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nxv9j_a3162404-2375-4cd0-8d8d-73520e721f53/extract-content/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.584906 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nxv9j_a3162404-2375-4cd0-8d8d-73520e721f53/extract-content/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.586795 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nxv9j_a3162404-2375-4cd0-8d8d-73520e721f53/extract-utilities/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.688291 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nxv9j_a3162404-2375-4cd0-8d8d-73520e721f53/extract-utilities/0.log" Nov 28 07:49:37 crc kubenswrapper[4922]: I1128 07:49:37.712382 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nxv9j_a3162404-2375-4cd0-8d8d-73520e721f53/extract-content/0.log" Nov 28 07:49:38 crc kubenswrapper[4922]: I1128 07:49:38.125598 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-nxv9j_a3162404-2375-4cd0-8d8d-73520e721f53/registry-server/0.log" Nov 28 07:49:57 crc kubenswrapper[4922]: I1128 07:49:57.312071 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:49:57 crc kubenswrapper[4922]: I1128 07:49:57.312935 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.928827 4922 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-82rzm"] Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929585 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" containerName="extract-content" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929599 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" containerName="extract-content" Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929613 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75037d4c-bc94-4df5-9c07-faf075579935" containerName="extract-content" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929621 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="75037d4c-bc94-4df5-9c07-faf075579935" containerName="extract-content" Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929634 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929642 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929655 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75037d4c-bc94-4df5-9c07-faf075579935" containerName="extract-utilities" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929663 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="75037d4c-bc94-4df5-9c07-faf075579935" containerName="extract-utilities" Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929678 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerName="extract-utilities" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929688 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerName="extract-utilities" Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929702 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" containerName="extract-utilities" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929709 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" containerName="extract-utilities" Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929722 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerName="extract-content" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929729 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerName="extract-content" Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929742 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929750 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: E1128 07:50:20.929768 4922 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75037d4c-bc94-4df5-9c07-faf075579935" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929776 4922 state_mem.go:107] "Deleted CPUSet assignment" podUID="75037d4c-bc94-4df5-9c07-faf075579935" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929951 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="91ea2172-69d4-4310-970a-376c9e536ed2" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929966 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="75037d4c-bc94-4df5-9c07-faf075579935" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.929977 4922 memory_manager.go:354] "RemoveStaleState removing state" podUID="03a90ec9-7f0a-4cef-9465-c7deaad99357" containerName="registry-server" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.931177 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:20 crc kubenswrapper[4922]: I1128 07:50:20.953130 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-82rzm"] Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.039369 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5g7f\" (UniqueName: \"kubernetes.io/projected/6f76745a-e3fe-4b68-9bd5-4bd23793be94-kube-api-access-z5g7f\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.039494 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-utilities\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.039796 4922 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-catalog-content\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.141169 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-catalog-content\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.141547 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5g7f\" (UniqueName: \"kubernetes.io/projected/6f76745a-e3fe-4b68-9bd5-4bd23793be94-kube-api-access-z5g7f\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.141720 4922 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-utilities\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.141968 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-catalog-content\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.142173 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-utilities\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.165150 4922 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5g7f\" (UniqueName: \"kubernetes.io/projected/6f76745a-e3fe-4b68-9bd5-4bd23793be94-kube-api-access-z5g7f\") pod \"community-operators-82rzm\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.260461 4922 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:21 crc kubenswrapper[4922]: I1128 07:50:21.721239 4922 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-82rzm"] Nov 28 07:50:22 crc kubenswrapper[4922]: I1128 07:50:22.345721 4922 generic.go:334] "Generic (PLEG): container finished" podID="6f76745a-e3fe-4b68-9bd5-4bd23793be94" containerID="18c96ddc9a7582ddf0bd8167b8dfc1be76dc8465f65fdca7a72023e787519f02" exitCode=0 Nov 28 07:50:22 crc kubenswrapper[4922]: I1128 07:50:22.345900 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82rzm" event={"ID":"6f76745a-e3fe-4b68-9bd5-4bd23793be94","Type":"ContainerDied","Data":"18c96ddc9a7582ddf0bd8167b8dfc1be76dc8465f65fdca7a72023e787519f02"} Nov 28 07:50:22 crc kubenswrapper[4922]: I1128 07:50:22.346108 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82rzm" event={"ID":"6f76745a-e3fe-4b68-9bd5-4bd23793be94","Type":"ContainerStarted","Data":"5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d"} Nov 28 07:50:23 crc kubenswrapper[4922]: I1128 07:50:23.357451 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82rzm" event={"ID":"6f76745a-e3fe-4b68-9bd5-4bd23793be94","Type":"ContainerStarted","Data":"65ed182fafaa8944528c164521d5da47e1aebf5d0e22a82be9164f27ec6bbd2c"} Nov 28 07:50:24 crc kubenswrapper[4922]: I1128 07:50:24.370497 4922 generic.go:334] "Generic (PLEG): container finished" podID="6f76745a-e3fe-4b68-9bd5-4bd23793be94" containerID="65ed182fafaa8944528c164521d5da47e1aebf5d0e22a82be9164f27ec6bbd2c" exitCode=0 Nov 28 07:50:24 crc kubenswrapper[4922]: I1128 07:50:24.370632 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82rzm" event={"ID":"6f76745a-e3fe-4b68-9bd5-4bd23793be94","Type":"ContainerDied","Data":"65ed182fafaa8944528c164521d5da47e1aebf5d0e22a82be9164f27ec6bbd2c"} Nov 28 07:50:25 crc kubenswrapper[4922]: I1128 07:50:25.389577 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82rzm" event={"ID":"6f76745a-e3fe-4b68-9bd5-4bd23793be94","Type":"ContainerStarted","Data":"c7f836fe575d63548927a979a0e0c035125f6bbafca0c296739cc89d432998b9"} Nov 28 07:50:25 crc kubenswrapper[4922]: I1128 07:50:25.435363 4922 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-82rzm" podStartSLOduration=2.880155002 podStartE2EDuration="5.435329183s" podCreationTimestamp="2025-11-28 07:50:20 +0000 UTC" firstStartedPulling="2025-11-28 07:50:22.347772363 +0000 UTC m=+3467.268167995" lastFinishedPulling="2025-11-28 07:50:24.902946554 +0000 UTC m=+3469.823342176" observedRunningTime="2025-11-28 07:50:25.426915575 +0000 UTC m=+3470.347311177" watchObservedRunningTime="2025-11-28 07:50:25.435329183 +0000 UTC m=+3470.355724805" Nov 28 07:50:27 crc kubenswrapper[4922]: I1128 07:50:27.311579 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:50:27 crc kubenswrapper[4922]: I1128 07:50:27.312057 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:50:31 crc kubenswrapper[4922]: I1128 07:50:31.261538 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:31 crc kubenswrapper[4922]: I1128 07:50:31.262144 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:31 crc kubenswrapper[4922]: I1128 07:50:31.352930 4922 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:31 crc kubenswrapper[4922]: I1128 07:50:31.557210 4922 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:31 crc kubenswrapper[4922]: I1128 07:50:31.622692 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-82rzm"] Nov 28 07:50:33 crc kubenswrapper[4922]: I1128 07:50:33.464506 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-82rzm" podUID="6f76745a-e3fe-4b68-9bd5-4bd23793be94" containerName="registry-server" containerID="cri-o://c7f836fe575d63548927a979a0e0c035125f6bbafca0c296739cc89d432998b9" gracePeriod=2 Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.488443 4922 generic.go:334] "Generic (PLEG): container finished" podID="6f76745a-e3fe-4b68-9bd5-4bd23793be94" containerID="c7f836fe575d63548927a979a0e0c035125f6bbafca0c296739cc89d432998b9" exitCode=0 Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.488536 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82rzm" event={"ID":"6f76745a-e3fe-4b68-9bd5-4bd23793be94","Type":"ContainerDied","Data":"c7f836fe575d63548927a979a0e0c035125f6bbafca0c296739cc89d432998b9"} Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.488774 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82rzm" event={"ID":"6f76745a-e3fe-4b68-9bd5-4bd23793be94","Type":"ContainerDied","Data":"5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d"} Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.488793 4922 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d" Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.489205 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.579850 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5g7f\" (UniqueName: \"kubernetes.io/projected/6f76745a-e3fe-4b68-9bd5-4bd23793be94-kube-api-access-z5g7f\") pod \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.579935 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-catalog-content\") pod \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.580001 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-utilities\") pod \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\" (UID: \"6f76745a-e3fe-4b68-9bd5-4bd23793be94\") " Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.580986 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-utilities" (OuterVolumeSpecName: "utilities") pod "6f76745a-e3fe-4b68-9bd5-4bd23793be94" (UID: "6f76745a-e3fe-4b68-9bd5-4bd23793be94"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.584414 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f76745a-e3fe-4b68-9bd5-4bd23793be94-kube-api-access-z5g7f" (OuterVolumeSpecName: "kube-api-access-z5g7f") pod "6f76745a-e3fe-4b68-9bd5-4bd23793be94" (UID: "6f76745a-e3fe-4b68-9bd5-4bd23793be94"). InnerVolumeSpecName "kube-api-access-z5g7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.660536 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f76745a-e3fe-4b68-9bd5-4bd23793be94" (UID: "6f76745a-e3fe-4b68-9bd5-4bd23793be94"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.681937 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5g7f\" (UniqueName: \"kubernetes.io/projected/6f76745a-e3fe-4b68-9bd5-4bd23793be94-kube-api-access-z5g7f\") on node \"crc\" DevicePath \"\"" Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.681974 4922 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 07:50:34 crc kubenswrapper[4922]: I1128 07:50:34.681983 4922 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f76745a-e3fe-4b68-9bd5-4bd23793be94-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 07:50:35 crc kubenswrapper[4922]: I1128 07:50:35.499896 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-82rzm" Nov 28 07:50:35 crc kubenswrapper[4922]: I1128 07:50:35.535016 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-82rzm"] Nov 28 07:50:35 crc kubenswrapper[4922]: I1128 07:50:35.541635 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-82rzm"] Nov 28 07:50:37 crc kubenswrapper[4922]: I1128 07:50:37.426564 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f76745a-e3fe-4b68-9bd5-4bd23793be94" path="/var/lib/kubelet/pods/6f76745a-e3fe-4b68-9bd5-4bd23793be94/volumes" Nov 28 07:50:42 crc kubenswrapper[4922]: I1128 07:50:42.574501 4922 generic.go:334] "Generic (PLEG): container finished" podID="3f397631-c6cf-4382-8761-fe6c1c3d1eba" containerID="b68b3684879f21d363b41ce50a176601da5f515c8fdb708f92552a1db98e227a" exitCode=0 Nov 28 07:50:42 crc kubenswrapper[4922]: I1128 07:50:42.574606 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-ftntt/must-gather-285z7" event={"ID":"3f397631-c6cf-4382-8761-fe6c1c3d1eba","Type":"ContainerDied","Data":"b68b3684879f21d363b41ce50a176601da5f515c8fdb708f92552a1db98e227a"} Nov 28 07:50:42 crc kubenswrapper[4922]: I1128 07:50:42.576347 4922 scope.go:117] "RemoveContainer" containerID="b68b3684879f21d363b41ce50a176601da5f515c8fdb708f92552a1db98e227a" Nov 28 07:50:42 crc kubenswrapper[4922]: E1128 07:50:42.615425 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice/crio-5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice\": RecentStats: unable to find data in memory cache]" Nov 28 07:50:43 crc kubenswrapper[4922]: I1128 07:50:43.261546 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ftntt_must-gather-285z7_3f397631-c6cf-4382-8761-fe6c1c3d1eba/gather/0.log" Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.387298 4922 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-ftntt/must-gather-285z7"] Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.387890 4922 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-ftntt/must-gather-285z7"] Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.388063 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-ftntt/must-gather-285z7" podUID="3f397631-c6cf-4382-8761-fe6c1c3d1eba" containerName="copy" containerID="cri-o://fd472eebc3f8062dd87907d673c1bf80cd8a36f314a541838af56551a88f239b" gracePeriod=2 Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.654123 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ftntt_must-gather-285z7_3f397631-c6cf-4382-8761-fe6c1c3d1eba/copy/0.log" Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.654862 4922 generic.go:334] "Generic (PLEG): container finished" podID="3f397631-c6cf-4382-8761-fe6c1c3d1eba" containerID="fd472eebc3f8062dd87907d673c1bf80cd8a36f314a541838af56551a88f239b" exitCode=143 Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.801253 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ftntt_must-gather-285z7_3f397631-c6cf-4382-8761-fe6c1c3d1eba/copy/0.log" Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.801943 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.940749 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jn9b4\" (UniqueName: \"kubernetes.io/projected/3f397631-c6cf-4382-8761-fe6c1c3d1eba-kube-api-access-jn9b4\") pod \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\" (UID: \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\") " Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.940873 4922 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3f397631-c6cf-4382-8761-fe6c1c3d1eba-must-gather-output\") pod \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\" (UID: \"3f397631-c6cf-4382-8761-fe6c1c3d1eba\") " Nov 28 07:50:50 crc kubenswrapper[4922]: I1128 07:50:50.955518 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f397631-c6cf-4382-8761-fe6c1c3d1eba-kube-api-access-jn9b4" (OuterVolumeSpecName: "kube-api-access-jn9b4") pod "3f397631-c6cf-4382-8761-fe6c1c3d1eba" (UID: "3f397631-c6cf-4382-8761-fe6c1c3d1eba"). InnerVolumeSpecName "kube-api-access-jn9b4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 07:50:51 crc kubenswrapper[4922]: I1128 07:50:51.043081 4922 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jn9b4\" (UniqueName: \"kubernetes.io/projected/3f397631-c6cf-4382-8761-fe6c1c3d1eba-kube-api-access-jn9b4\") on node \"crc\" DevicePath \"\"" Nov 28 07:50:51 crc kubenswrapper[4922]: I1128 07:50:51.057274 4922 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f397631-c6cf-4382-8761-fe6c1c3d1eba-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "3f397631-c6cf-4382-8761-fe6c1c3d1eba" (UID: "3f397631-c6cf-4382-8761-fe6c1c3d1eba"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 07:50:51 crc kubenswrapper[4922]: I1128 07:50:51.144814 4922 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/3f397631-c6cf-4382-8761-fe6c1c3d1eba-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 07:50:51 crc kubenswrapper[4922]: I1128 07:50:51.412970 4922 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f397631-c6cf-4382-8761-fe6c1c3d1eba" path="/var/lib/kubelet/pods/3f397631-c6cf-4382-8761-fe6c1c3d1eba/volumes" Nov 28 07:50:51 crc kubenswrapper[4922]: I1128 07:50:51.665651 4922 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-ftntt_must-gather-285z7_3f397631-c6cf-4382-8761-fe6c1c3d1eba/copy/0.log" Nov 28 07:50:51 crc kubenswrapper[4922]: I1128 07:50:51.666301 4922 scope.go:117] "RemoveContainer" containerID="fd472eebc3f8062dd87907d673c1bf80cd8a36f314a541838af56551a88f239b" Nov 28 07:50:51 crc kubenswrapper[4922]: I1128 07:50:51.666332 4922 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-ftntt/must-gather-285z7" Nov 28 07:50:51 crc kubenswrapper[4922]: I1128 07:50:51.692155 4922 scope.go:117] "RemoveContainer" containerID="b68b3684879f21d363b41ce50a176601da5f515c8fdb708f92552a1db98e227a" Nov 28 07:50:52 crc kubenswrapper[4922]: E1128 07:50:52.841629 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice/crio-5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice\": RecentStats: unable to find data in memory cache]" Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.311296 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.311914 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.311967 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.312601 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3f72ca2cbcc03c66bce9f3d447e83761baf9c3a351e079449e2d091afcaa328e"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.312658 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://3f72ca2cbcc03c66bce9f3d447e83761baf9c3a351e079449e2d091afcaa328e" gracePeriod=600 Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.718629 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="3f72ca2cbcc03c66bce9f3d447e83761baf9c3a351e079449e2d091afcaa328e" exitCode=0 Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.718694 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"3f72ca2cbcc03c66bce9f3d447e83761baf9c3a351e079449e2d091afcaa328e"} Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.719053 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerStarted","Data":"1c9d0a63bfeea22d8df3da84f0408710c183fabaab68dc3dce0405df0f3587d4"} Nov 28 07:50:57 crc kubenswrapper[4922]: I1128 07:50:57.719071 4922 scope.go:117] "RemoveContainer" containerID="fd445a80f0133782577e38815133e01b704316845be72e79195e07ba29f4777b" Nov 28 07:51:03 crc kubenswrapper[4922]: E1128 07:51:03.041260 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice/crio-5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d\": RecentStats: unable to find data in memory cache]" Nov 28 07:51:13 crc kubenswrapper[4922]: E1128 07:51:13.276528 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice/crio-5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d\": RecentStats: unable to find data in memory cache]" Nov 28 07:51:23 crc kubenswrapper[4922]: E1128 07:51:23.511003 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice/crio-5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d\": RecentStats: unable to find data in memory cache]" Nov 28 07:51:33 crc kubenswrapper[4922]: E1128 07:51:33.739610 4922 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice/crio-5bbdf80202b9f117455ca02beaebded30a319b8d97972e21d4631905052fc19d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f76745a_e3fe_4b68_9bd5_4bd23793be94.slice\": RecentStats: unable to find data in memory cache]" Nov 28 07:52:57 crc kubenswrapper[4922]: I1128 07:52:57.312614 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:52:57 crc kubenswrapper[4922]: I1128 07:52:57.313388 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:53:27 crc kubenswrapper[4922]: I1128 07:53:27.312621 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:53:27 crc kubenswrapper[4922]: I1128 07:53:27.313371 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.312001 4922 patch_prober.go:28] interesting pod/machine-config-daemon-h8wk6 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.313494 4922 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.313601 4922 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.314430 4922 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c9d0a63bfeea22d8df3da84f0408710c183fabaab68dc3dce0405df0f3587d4"} pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.314530 4922 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerName="machine-config-daemon" containerID="cri-o://1c9d0a63bfeea22d8df3da84f0408710c183fabaab68dc3dce0405df0f3587d4" gracePeriod=600 Nov 28 07:53:57 crc kubenswrapper[4922]: E1128 07:53:57.461857 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.542761 4922 generic.go:334] "Generic (PLEG): container finished" podID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" containerID="1c9d0a63bfeea22d8df3da84f0408710c183fabaab68dc3dce0405df0f3587d4" exitCode=0 Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.542825 4922 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" event={"ID":"0498340a-5e95-42bf-a0a6-8ac89a6b8858","Type":"ContainerDied","Data":"1c9d0a63bfeea22d8df3da84f0408710c183fabaab68dc3dce0405df0f3587d4"} Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.542919 4922 scope.go:117] "RemoveContainer" containerID="3f72ca2cbcc03c66bce9f3d447e83761baf9c3a351e079449e2d091afcaa328e" Nov 28 07:53:57 crc kubenswrapper[4922]: I1128 07:53:57.544278 4922 scope.go:117] "RemoveContainer" containerID="1c9d0a63bfeea22d8df3da84f0408710c183fabaab68dc3dce0405df0f3587d4" Nov 28 07:53:57 crc kubenswrapper[4922]: E1128 07:53:57.546667 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:54:09 crc kubenswrapper[4922]: I1128 07:54:09.398738 4922 scope.go:117] "RemoveContainer" containerID="1c9d0a63bfeea22d8df3da84f0408710c183fabaab68dc3dce0405df0f3587d4" Nov 28 07:54:09 crc kubenswrapper[4922]: E1128 07:54:09.399971 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" Nov 28 07:54:23 crc kubenswrapper[4922]: I1128 07:54:23.399831 4922 scope.go:117] "RemoveContainer" containerID="1c9d0a63bfeea22d8df3da84f0408710c183fabaab68dc3dce0405df0f3587d4" Nov 28 07:54:23 crc kubenswrapper[4922]: E1128 07:54:23.401190 4922 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-h8wk6_openshift-machine-config-operator(0498340a-5e95-42bf-a0a6-8ac89a6b8858)\"" pod="openshift-machine-config-operator/machine-config-daemon-h8wk6" podUID="0498340a-5e95-42bf-a0a6-8ac89a6b8858" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112252503024441 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112252503017356 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112242312016476 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112242312015446 5ustar corecore